source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_reduce_each_index.c
//------------------------------------------------------------------------------ // GB_reduce_each_index: T(i)=reduce(A(i,:)), reduce a matrix to a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Reduce a matrix to a vector. All entries in A(i,:) are reduced to T(i). // First, all threads reduce their slice to their own workspace, operating on // roughly the same number of entries each. The vectors in A are ignored; the // reduction only depends on the indices. Next, the threads cooperate to // reduce all workspaces to the workspace of thread 0. Finally, this last // workspace is collected into T. { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const GB_ATYPE *restrict Ax = A->x ; const int64_t *restrict Ai = A->i ; const int64_t n = A->vlen ; size_t zsize = ttype->size ; //-------------------------------------------------------------------------- // allocate workspace for each thread //-------------------------------------------------------------------------- GB_CTYPE *Works [nth] ; bool *Marks [nth] ; bool ok = true ; // This does not need to be parallel. The calloc does not take O(n) time. for (int tid = 0 ; tid < nth ; tid++) { GB_MALLOC_MEMORY (Works [tid], n, zsize) ; GB_CALLOC_MEMORY (Marks [tid], n, sizeof (bool)) ; ok = ok && (Works [tid] != NULL && Marks [tid] != NULL) ; } if (!ok) { // out of memory for (int tid = 0 ; tid < nth ; tid++) { GB_FREE_MEMORY (Works [tid], n, zsize) ; GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ; } return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // reduce each slice in its own workspace //-------------------------------------------------------------------------- int64_t Tnz [nth] ; // each thread reduces its own slice in parallel #pragma omp parallel for num_threads(nth) schedule(static) for (int tid = 0 ; tid < nth ; tid++) { //---------------------------------------------------------------------- // get the workspace for this thread //---------------------------------------------------------------------- GB_CTYPE *restrict Work = Works [tid] ; bool *restrict Mark = Marks [tid] ; int64_t my_tnz = 0 ; //---------------------------------------------------------------------- // reduce the entries //---------------------------------------------------------------------- for (int64_t p = pstart_slice [tid] ; p < pstart_slice [tid+1] ;p++) { int64_t i = Ai [p] ; // ztype aij = (ztype) Ax [p], with typecast GB_SCALAR (aij) ; GB_CAST_ARRAY_TO_SCALAR (aij, Ax, p) ; if (!Mark [i]) { // first time index i has been seen // Work [i] = aij ; no typecast GB_COPY_SCALAR_TO_ARRAY (Work, i, aij) ; Mark [i] = true ; my_tnz++ ; } else { // Work [i] += aij ; no typecast GB_ADD_SCALAR_TO_ARRAY (Work, i, aij) ; } } Tnz [tid] = my_tnz ; } //-------------------------------------------------------------------------- // reduce all workspace to Work [0] and count # entries in T //-------------------------------------------------------------------------- GB_CTYPE *restrict Work0 = Works [0] ; bool *restrict Mark0 = Marks [0] ; int64_t tnz = Tnz [0] ; if (nth > 1) { #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:tnz) for (int64_t i = 0 ; i < n ; i++) { for (int tid = 1 ; tid < nth ; tid++) { const bool *restrict Mark = Marks [tid] ; if (Mark [i]) { // thread tid has a contribution to index i const GB_CTYPE *restrict Work = Works [tid] ; if (!Mark0 [i]) { // first time index i has been seen // Work0 [i] = Work [i] ; no typecast GB_COPY_ARRAY_TO_ARRAY (Work0, i, Work, i) ; Mark0 [i] = true ; tnz++ ; } else { // Work0 [i] += Work [i] ; no typecast GB_ADD_ARRAY_TO_ARRAY (Work0, i, Work, i) ; } } } } // free all but workspace for thread 0 for (int tid = 1 ; tid < nth ; tid++) { GB_FREE_MEMORY (Works [tid], n, zsize) ; GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ; } } //-------------------------------------------------------------------------- // allocate T //-------------------------------------------------------------------------- // since T is a GrB_Vector, it is CSC and not hypersparse GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, tnz, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_MEMORY (Works [0], n, zsize) ; GB_FREE_MEMORY (Marks [0], n, sizeof (bool)) ; return (GB_OUT_OF_MEMORY) ; } T->p [0] = 0 ; T->p [1] = tnz ; int64_t *restrict Ti = T->i ; GB_CTYPE *restrict Tx = T->x ; T->nvec_nonempty = (tnz > 0) ? 1 : 0 ; //-------------------------------------------------------------------------- // gather the results into T //-------------------------------------------------------------------------- if (tnz == n) { //---------------------------------------------------------------------- // T is dense: transplant Work0 into T->x //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < n ; i++) { Ti [i] = i ; } GB_FREE_MEMORY (T->x, n, zsize) ; T->x = Work0 ; Work0 = NULL ; } else { //---------------------------------------------------------------------- // T is sparse: gather from Work0 and Mark0 //---------------------------------------------------------------------- if (nthreads == 1) { //------------------------------------------------------------------ // gather sparse T using a single thread //------------------------------------------------------------------ int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } ASSERT (p == tnz) ; } else { //------------------------------------------------------------------ // gather sparse T using multiple threads //------------------------------------------------------------------ // Some tasks may be completely empty and thus take no time at all; // 256 tasks per thread are created for better load balancing. int ntasks = 256 * nthreads ; ntasks = GB_IMIN (ntasks, n) ; int64_t Count [ntasks+1] ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (int taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = 0 ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; for (int64_t i = ifirst ; i < ilast ; i++) { p += Mark0 [i] ; } Count [taskid] = p ; } GB_cumsum (Count, ntasks, NULL, 1) ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (int64_t taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = Count [taskid] ; int64_t my_count = (Count [taskid+1] - p) ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; if (my_count > 0) { for (int64_t i = ifirst ; i < ilast ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } } } #ifdef GB_DEBUG // check result using a single thread int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { ASSERT (Ti [p] == i) ; p++ ; } } ASSERT (p == tnz) ; #endif } } //-------------------------------------------------------------------------- // free workspace for thread 0 //-------------------------------------------------------------------------- GB_FREE_MEMORY (Work0, n, zsize) ; GB_FREE_MEMORY (Mark0, n, sizeof (bool)) ; }
ast-dump-openmp-target-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:4:9, col:24> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:9) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:9) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:10:9, col:24> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:17:9, col:36> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:24:9, col:36> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetSimdDirective {{.*}} <line:31:9, col:36> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:34> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:9) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:9) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:9) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
shape.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helpers/sharedmem.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" #include <stdint.h> #include <array/ArrayOptions.h> typedef unsigned int uint; namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { _CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} Nd4jLong *shape; Nd4jLong *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2); ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim); template <typename T> ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length); ND4J_EXPORT _CUDA_HD void traceNew(int id); ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); ND4J_EXPORT _CUDA_HD bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output); //ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager); __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret); ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order); ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy); ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo); /** * copy-past from java hasDefaultStridesForShape function * check whether array is not permuted and has contiguous elements in memory */ ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return 0 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return 0 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer); /** * * @param length * @param shape * @param rearrange * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange); ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank); /** * When 1 dimension is the whole length of the * array */ ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank); INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo); /** * Returns the shape portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy); template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank); ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); /** * Returns the rank portion of * an information buffer */ ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD int rank(const int *buffer); ND4J_EXPORT _CUDA_HD int rank(const unsigned int *buffer); // returns pointer on elementWiseStride ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer); /** * Returns the stride portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer); /** * Compute the length of the given shape */ ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape); /*** * Returns the offset portion of an information buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer); /** * Returns the ordering * for this shape information buffer */ ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer); /** * Returns the type */ ND4J_EXPORT _CUDA_HD Nd4jLong type(const Nd4jLong* shapeInfo); /** * Returns the element wise stride for this information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to); /** * Keep the given indexes * in the data */ ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length); template <typename T1, typename T2> ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ // ND4J_EXPORT _CUDA_HD int offset(int index, // int rank, // shape::ShapeInformation *info, // Nd4jLong *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads); // ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info); ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ // ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ ND4J_EXPORT _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices); /** * increment n-dimensional array by one iteration by changing coord appropriately * for example we have array with shape {2, 3}: * - if input coord = {0,1}, then output coord = {0,2} * - if input coord = {0,2}, then output coord = {1,0} * so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2} */ /* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1} * arrLen - array length */ ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen); ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen); ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order); ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned); /** * Compute the real linear indices for the given shape and stride */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out); ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides); ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length); ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length); ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length); template<typename T> ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message); ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr); // ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer); // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions); // function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // max array is outer for min array, min array is sub-array of max array // function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs) // dimsToExclude - should be sorted in increasing order // dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr); // calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr); // calculates offsets for numOfSubArrs sub-arrays, shape in this context means dominions excluded from outer array // rank is equal to size of shape ND4J_EXPORT void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets); ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order); // calculate element-wise stride // if array is scalar or unit length vector then ews = 1 // if array is common vector then ews = stride of non-unity dimension // if strides are normal set ews = 1, otherwise ews = 0 ND4J_EXPORT _CUDA_HD void calcEws(Nd4jLong* shapeInfo, Nd4jLong len); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ template <typename T> __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (Nd4jLong *) malloc(size); } else { Nd4jLong *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { Nd4jLong *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2))); } INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) { Nd4jLong *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new Nd4jLong[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { auto newRetShape = new Nd4jLong[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { auto newRetShape = new Nd4jLong[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { auto newRetShape = new Nd4jLong[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape); delete[] retShape; return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) { Nd4jLong *theShape = shape::shapeOf(shapeInfo); Nd4jLong *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; Nd4jLong *ret = buffer; //set the rank ret[0] = rank; Nd4jLong *retShape = shape::shapeOf(ret); Nd4jLong *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong newStride[2] = {1,theStride[dimension[0]]}; Nd4jLong newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) { traceNew(5); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) { buffer[0] = rank; Nd4jLong *retShape = shape::shapeOf(buffer); Nd4jLong *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); Nd4jLong *ret = new Nd4jLong[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) { if (isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) { traceNew(7); Nd4jLong *stride = new Nd4jLong[rank]; if (rank == 1) { stride[0] = 1; return stride; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // stride[i] = 1; // return stride; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) { if (rank == 1) { ret[0] = 1; return ret; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // ret[i] = 1; // return ret; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) { return calcStridesFortran(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) { return calcStrides(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStrides(shape, rank, 1, ret); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) { int rank = shapeInfo[0]; int doubleRank = 2*rank; if (rank > 0) { if (order == 'c') { shapeInfo[doubleRank] = 1; // set unity as last stride for c order for (int j = 1; j < rank; ++j) { shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j]; } } else { shapeInfo[rank + 1] = 1; // set unity as first stride for f order for (int j = rank + 1; j < doubleRank; ++j) { shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank]; } } } // set last 2 elements in shapeInfo shapeInfo[doubleRank + 2] = 1; shapeInfo[doubleRank + 3] = (int)order; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) { if (rank > 0) { if (order == 'c') { stridesOnly[rank - 1] = 1; // set unity as last stride for c order for (int j = 1; j < rank; ++j) stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j]; } else { stridesOnly[0] = 1; // set unity as first stride for f order for (int j = 1; j < rank; ++j) { stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1]; } } } } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) { auto copy = new ShapeInformation; traceNew(8); copy->shape = new Nd4jLong[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong)); traceNew(9); copy->stride = new Nd4jLong[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) { if (rank == 0) return 1; if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; Nd4jLong *oldDims = shape::copyOf(rank, shape); Nd4jLong *oldStrides = shape::copyOf(rank, stride); int np, op, last_stride; int oldStart, oldStop, ok, newStart, newStop, nk; traceNew(10); auto newStrides = new Nd4jLong[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; auto newShape = new Nd4jLong[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oldStart = 0; oldStart < rank; oldStart++) { if (shape[oldStart] != 1) { oldDims[oldnd] = shape[oldStart]; oldStrides[oldnd] = stride[oldStart]; oldnd++; } } np = 1; for (newStart = 0; newStart < newShapeRank; newStart++) { np *= newShape[newStart]; } op = 1; for (oldStart = 0; oldStart < oldnd; oldStart++) { op *= oldDims[oldStart]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } /* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */ oldStart = 0; oldStop = 1; newStart = 0; newStop = 1; while (newStart < newShapeRank && oldStart < oldnd) { np = newShape[newStart]; op = oldDims[oldStart]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[newStop++]; } else { op *= oldDims[oldStop++]; } } /* Check whether the original axes can be combined */ for (ok = oldStart; ok < oldStop - 1; ok++) { if (isFOrder) { if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } } else { /* C order */ if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[newStart] = oldStrides[oldStart]; for (nk = newStart + 1; nk < newStop; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (nk = newStop - 1; nk > newStart; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } newStart = newStop++; oldStart = oldStop++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (newStart >= 1) { last_stride = newStrides[newStart - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (newStart >= 1) last_stride *= newShape[newStart - 1]; } for (nk = newStart; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return ret; } } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return 0; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) { Nd4jLong *stride = shape::calcStrides(shape, rank); traceNew(11); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) { Nd4jLong stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); nd4j::ArrayOptions::setDataType(buffer, dtype); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) { auto stride = shape::calcStridesFortran(shape,rank); traceNew(12); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) { Nd4jLong stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); nd4j::ArrayOptions::setDataType(output, dtype); return output; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) { Nd4jLong length = shape::prodLong(shape,rank); traceNew(13); Nd4jLong *ret = new Nd4jLong[length]; for(int i = 0; i < length; i++) { Nd4jLong *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ INLINEDEF _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices) { Nd4jLong index = indices[rank-1]; Nd4jLong shift = 1; for(int i = rank-2; i >= 0; --i) { shift *= shape[i+1]; index += shift * indices[i]; } return index; } template <typename T> INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) { PRAGMA_OMP_SIMD for (int e = 0; e < length; e++) buffer[e] = value; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2sub(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2sub(rank,shape, index, shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong * ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong *ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param arrLen the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *ret) { for(int i = 0; i < rank; i++) { arrLen /= shape[i]; if(arrLen > 0) { ret[i] = index / arrLen; index %= arrLen; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) { const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2]; if(ews > 0 && order(shapeInfo) == 'c') if (ews == 1) return index; else return ews * index; Nd4jLong offset = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + shapeInfo[0]]; index %= arrLen; } } return offset; } INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) { const uint rank = shapeInfo[0]; const uint ews = shapeInfo[rank + rank + 2]; if(ews > 0 && shapeInfo[rank + rank + 3] == 99) if (ews == 1) return index; else return ews * index; uint offset = 0; for(uint i = 1; i <= rank; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + rank]; index %= arrLen; } } return offset; } INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) { if(useUnsigned) return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen)); return getIndexOffset(index, lShapeInfo, arrLen); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) { Nd4jLong offset = 0; if(order == 'c') { for(int i = 1; i <= *shapeInfo; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } else { for(int i = *shapeInfo; i >= 1 ; --i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } return offset; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) { traceNew(16); Nd4jLong *ret = new Nd4jLong[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) { if(length == 1) { return; } else { Nd4jLong *shapeDeref = *shape; if(shape::prodLong(shapeDeref,length) < 2) { return; } } bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; Nd4jLong *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { auto shapeFirst = shapeDeref[0]; auto shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } else if(length == 1) { //no permute return; } auto temp = new Nd4jLong[length]; memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out); } INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) { auto len = shape::shapeInfoLength(shape::rank(shapeBuffer)); Nd4jLong *copy = shape::copyOf(len, shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[2 * rank + 2] = 0; // ews shapeInfo[2 * rank + 3] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); // order delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = 0; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo), 1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) { //no swapping needs to happen if(shape::isScalar(shapeBuffer)) { return; } Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rearrageRank) - 2] = 0; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); // doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large } /* INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { auto shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeRef[shapeInfoLength(rearrageRank) - 2] = 0; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } */ INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); shapeBuffer[shape::shapeInfoLength(rank) - 2] = 0; delete[] rearrangeCopy2; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); if(shapeBuffer != tmpBuffer) shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rank) - 2] = 0; shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); Nd4jLong *ret = new Nd4jLong[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } INLINEDEF _CUDA_HD void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) { if (rank == 0) return 0; if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1 && shapeInfo[0] > 2; } INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) { if(rank(shapeInfo) > 0 && length(shapeInfo) == 1) return true; int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1; } INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) { return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo)); } INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1; return isVector && shapeFirstOne; } INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) { traceNew(18); T *ret = new T[length]; return copyOf(length, toCopy, ret); } template <typename T> INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) { memcpy(ret, toCopy, sizeof(T)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) { memcpy(to, from, sizeof(T)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ /* INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) { Nd4jLong *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } */ /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) { return shape + 1; } INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) { return static_cast<int>(shape::shapeOf(shapeBuffer)[0]); } INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; Nd4jLong *currShape = shape::shapeOf(shapeBuffer); Nd4jLong *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride Nd4jLong *newShape = shape::shapeOf(newShapeBuffer); Nd4jLong *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { Nd4jLong *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; Nd4jLong *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } auto indices = new Nd4jLong[rank]; memset((void *) indices,0,rank * sizeof(Nd4jLong)); indices[0] = sliceIdx; Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; if(shape::isMatrix(shapeBuffer)) { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1]; } else { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer); } newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ INLINEDEF _CUDA_HD int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(Nd4jLong); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength((int) shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) { return static_cast<int>(buffer[0]); } INLINEDEF _CUDA_HD int rank(const int *buffer) { return buffer[0]; } INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) { return static_cast<int>(buffer[0]); } INLINEDEF _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo) { return shapeInfo + 2 * shapeInfo[0] + 2; } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) { traceNew(19); auto info = new ShapeInformation; auto length = shapeInfoLength(rank(buffer)); auto rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; Nd4jLong *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer) { return const_cast<Nd4jLong*>(buffer) + (1 + rank(buffer)); } INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) { return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY); } /** * Compute the length of the given shape */ INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); if (rank == 0) { if (isEmpty(shapeInfo)) return 0L; else return 1L; } if (rank == 1) return shapeInfo[1]; return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank); } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) { //FIXME magic numbers return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]); } /** * Returns type */ INLINEDEF _CUDA_HD Nd4jLong type(const Nd4jLong *shapeInfo) { return shapeInfo[2 * shapeInfo[0] + 1]; } /** * Returns the element wise stride for this information * buffer */ INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) { return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) { const int rank = shape::rank(info); if(rank > 2) return 0; if(rank == 0) return 1; if(rank == 1) return shape::shapeOf(info)[0] == 1; if(rank == 2) return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) { const int rank = info->rank; if(rank > 2) return 0; if(rank == 1) return info->shape[0] == 1; if(rank == 2) return info->shape[0] == 1 && info->shape[1] == 1; return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) { auto lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } auto ret = new T1[lengthOfArr]; memset(ret,0,sizeof(T1) * lengthOfArr); removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret); return ret; } INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); auto ret = new Nd4jLong[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) { traceNew(21); Nd4jLong *ret = new Nd4jLong[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) { if(e == (length - 3)) continue; // type position, neglect it if (shapeA[e] != shapeB[e]) return false; } return true; } INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) { if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews auto length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3]; } /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; T *ret; traceNew(22); if(diff / increment < 1) ret = new T[1]; else ret = new T[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to) { return range<T>(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) { traceNew(23); Nd4jLong *ret = new Nd4jLong[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ template <typename T> INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) { if (length < 1) return nullptr; traceNew(24); T *copy = new T[length]; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) { traceNew(25); T *ret = new T[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(T)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) { T* ret = new T[numTotalElements]; Nd4jLong count = 0; for (Nd4jLong i = 0; i < numArrays; i++) { for (Nd4jLong j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength); auto ret = prodLong(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) { auto tensorLength = prodLong(tensorShape, tensorShapeLength); auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength) { Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { Nd4jLong *keepShape = shape::shapeOf(shapeInfo); Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) { Nd4jLong offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { #ifdef __CUDA_ARCH__ printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]); #else printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]); #endif #ifdef __CUDA_ARCH__ //if (threadIdx.x == 0 && blockIdx.x == 0) // printShapeInfoLinear("getOffsetFailed", rank, shape, stride); #endif return -1; } if(shape[i] != 1) { offset += indices[i] * stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) { return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) { traceNew(29); auto ret = new Nd4jLong[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count] = info->order; return ret; } INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) { for(int i = 0; i < length; i++) { printf(" %lld ", (long long) arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) { for(int i = 0; i < length; i++) { printf(" %i ", arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); Nd4jLong *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ",(long long) shape[i]); } printf("\n"); Nd4jLong *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ", (long long) stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } INLINEDEF _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides) { printf("%s : [", msg); for (int i = 0; i < rank; i++) { printf("%lld, ", (long long) shape[i]); } for (int i = 0; i < rank; i++) { printf("%lld", (long long) strides[i]); if (i < rank - 1) printf(", "); } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("%s : [", msg); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } template <typename T> INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) { auto arr = reinterpret_cast<T*>(varr); if (message != nullptr) printf("%s: [", message); else printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", (float) arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *shape = shape::shapeOf(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() { traceNew(30); auto shape = new Nd4jLong[1]; shape[0] = 1; auto stride = new Nd4jLong[1]; stride[0] = 1; auto shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 1; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) { Nd4jLong prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) { Nd4jLong *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder); } // INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) { // unsigned Nd4jLong *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) { if(fortranOrder) { Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape); return shapeBufferRet; } else { Nd4jLong *newShape = new Nd4jLong[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape); delete[] newShape; return shapeBufferRet; } } INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer)); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } INLINEDEF _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo) { return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0); } ////////////////////////////////////////////////////////////////////////// // copy-past from java hasDefaultStridesForShape function INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) { const int rank = shape::rank(shapeInfo); if(rank == 0) return true; if(!strideDescendingCAscendingF(shapeInfo)) return false; Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH]; memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo)); shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo)); bool result = true; for(int i = rank+1; i <= 2*rank; ++i) if(defaultShapeInfo[i] != shapeInfo[i]) { result = false; break; } return result; } // INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) { // int oldnd; // Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); // Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); // int np, op, last_stride; // int oi, oj, ok, ni, nj, nk; // Nd4jLong* newStrides = new Nd4jLong[newRank]; // oldnd = 0; // /* // * Remove axes with dimension 1 from the old array. They have no effect // * but would need special cases since their strides do not matter. // */ // for (oi = 0; oi < oldRank; oi++) { // if (shape::shapeOf(oldShape)[oi] != 1) { // olddims[oldnd] = shape::shapeOf(oldShape)[oi]; // oldstrides[oldnd] = shape::stride(oldShape)[oi]; // oldnd++; // } // } // np = 1; // for (ni = 0; ni < newRank; ni++) { // np *= newShapeOf[ni]; // } // op = 1; // for (oi = 0; oi < oldnd; oi++) { // op *= olddims[oi]; // } // if (np != op) { // /* different total sizes; no hope */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // if (np == 0) { // /* the current code does not handle 0-sized arrays, so give up */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // /* oi to oj and ni to nj give the axis ranges currently worked with */ // oi = 0; // oj = 1; // ni = 0; // nj = 1; // while (ni < newRank && oi < oldnd) { // np = newShapeOf[ni]; // op = olddims[oi]; // while (np != op) { // if (np < op) { // /* Misses trailing 1s, these are handled later */ // np *= newShapeOf[nj++]; // } else { // op *= olddims[oj++]; // } // } // /* Check whether the original axes can be combined */ // for (ok = oi; ok < oj - 1; ok++) { // if (isFOrder) { // if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { // /* not contiguous enough */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // } else { // /* C order */ // if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { // /* not contiguous enough */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // } // } // /* Calculate new strides for all axes currently worked with */ // if (isFOrder) { // newStrides[ni] = oldstrides[oi]; // for (nk = ni + 1; nk < nj; nk++) { // newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; // } // } else { // /* C order */ // newStrides[nj - 1] = oldstrides[oj - 1]; // for (nk = nj - 1; nk > ni; nk--) { // newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; // } // } // ni = nj++; // oi = oj++; // } // if (ni >= 1) { // last_stride = newStrides[ni - 1]; // } else { // last_stride = shape::elementWiseStride(oldShape); // } // if (isFOrder && ni >= 1) { // last_stride *= newShapeOf[ni - 1]; // } // for (nk = ni; nk < newRank; nk++) { // newStrides[nk] = last_stride; // } // target[0] = newRank; // int cnt = 1; // for (int e = 0; e < newRank; e++) // target[cnt++] = newShapeOf[e]; // for (int e = 0; e < newRank; e++) // target[cnt++] = newStrides[e]; // target[shape::shapeInfoLength(newRank) - 3] = 0; // target[shape::shapeInfoLength(newRank) - 2] = 0; // target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; // nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape)); // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return true; // } // INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) { // // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements // // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo // const int newOrder = isFOrder ? 102 : 99; // const int oldOrder = oldShapeInfo[2 * oldRank + 3]; // newShapeInfo[0] = newRank; // memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong)); // Nd4jLong* newStrides = shape::stride(newShapeInfo); // const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo)); // const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo)); // int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; // while (newStart < newRank && oldStart < oldRank) { // newDim = newShape[newStart]; // oldDim = oldShape[oldStart]; // while (newDim != oldDim) // if (newDim < oldDim) newDim *= newShape[newStop++]; // else oldDim *= oldShape[oldStop++]; // // ------ Check whether the original axes can be combined ------ // // for (int i = oldStart; i < oldStop - 1; i++) { // if(oldShape[i] == 1) { // ignore strides like {...,1,1,...} // if(oldOrder == 102) ++oldStart; // continue; // } // if(oldOrder == 102 && oldStrides[i + 1] != oldShape[i] * oldStrides[i]) // return false; // not contiguous enough // if(oldOrder == 99 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1]) // return false; // not contiguous enough // } // // ------ Calculate new strides for all axes currently worked with ------ // // if(isFOrder) { // newStrides[newStart] = oldStrides[oldStart]; // for (int i = newStart + 1; i < newStop; ++i) // newStrides[i] = newStrides[i - 1] * newShape[i - 1]; // } // else { // newStrides[newStop - 1] = oldStrides[oldStop - 1]; // for (int i = newStop - 1; i > newStart; --i) // newStrides[i - 1] = newStrides[i] * newShape[i]; // } // newStart = newStop++; // oldStart = oldStop++; // } // newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order // newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews // newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type // return true; // } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) { // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo newShapeInfo[0] = newRank; memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong)); Nd4jLong* newStrides = shape::stride(newShapeInfo); const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo)); const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo)); int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; while (newStart < newRank && oldStart < oldRank) { newDim = newShape[newStart]; oldDim = oldShape[oldStart]; while (newDim != oldDim) if (newDim < oldDim) newDim *= newShape[newStop++]; else oldDim *= oldShape[oldStop++]; // ------ Check whether the original axes can be combined ------ // for (int i = oldStart; i < oldStop - 1; i++) if(oldShape[i] != 1 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1]) // oldShape[i] != 1 ---> ignore strides like {...,1,1,...} return false; // not contiguous enough newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (int i = newStop - 1; i > newStart; --i) newStrides[i - 1] = newStrides[i] * newShape[i]; newStart = newStop++; oldStart = oldStop++; } newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type return true; } INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { int oldnd; Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oldStart, oldStop, ok, newStart, newStop, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oldStart = 0; oldStart < oldRank; oldStart++) { if (shape::shapeOf(oldShape)[oldStart] != 1) { oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart]; oldStrides[oldnd] = shape::stride(oldShape)[oldStart]; oldnd++; } } np = 1; for (newStart = 0; newStart < newRank; newStart++) { np *= newShapeOf[newStart]; } op = 1; for (oldStart = 0; oldStart < oldnd; oldStart++) { op *= oldDims[oldStart]; } if (np != op) { /* different total sizes; no hope */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } /* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */ oldStart = 0; oldStop = 1; newStart = 0; newStop = 1; while (newStart < newRank && oldStart < oldnd) { np = newShapeOf[newStart]; op = oldDims[oldStart]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[newStop++]; } else { op *= oldDims[oldStop++]; } } /* Check whether the original axes can be combined */ for (ok = oldStart; ok < oldStop - 1; ok++) { if (isFOrder) { if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) { /* not contiguous enough */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } } else { /* C order */ if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) { /* not contiguous enough */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[newStart] = oldStrides[oldStart]; for (nk = newStart + 1; nk < newStop; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (nk = newStop - 1; nk > newStart; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } newStart = newStop++; oldStart = oldStop++; } delete[] oldDims; delete[] oldStrides; delete[] newStrides; return true; } // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also it sorts input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!"); // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!"); // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"); } // max array is outer for min array, min array is sub-array of max array // function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs) INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, int dimsLen) { const auto maxRank = shape::rank(maxShapeInfo); const auto minRank = shape::rank(minShapeInfo); // if(minRank >= maxRank) // throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!"); if(dimsLen == -1) dimsLen = maxRank - minRank; // if size is not given (= -1) then it is equal to ranks difference if(maxRank == minRank) { if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1} for (int i = 0; i < maxRank; ++i) { if(i < dimsLen) minIdxs[i] = maxIdxs[i]; else { if(maxIdxs[i] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1]; else if(maxIdxs[i] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i]; } } } else { for (int i = 0, dim = 0; i < maxRank; ++i) { if(dim < dimsLen && dimsToExclude[dim] == i) { minIdxs[i] = maxIdxs[i]; ++dim; continue; } if(maxIdxs[i] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1]; else if(maxIdxs[i] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i]; } } } else { if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1} for (int i = 0; i < minRank; ++i) { if(maxIdxs[i + dimsLen] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i + dimsLen] % minShapeInfo[i + 1]; else if(maxIdxs[i + dimsLen] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i + dimsLen]; } } else { for (int minI = 0, maxI = 0, dim = 0; maxI < maxRank; ++maxI) { if(dim < dimsLen && dimsToExclude[dim] == maxI) { ++dim; continue; } if(maxIdxs[maxI] == minShapeInfo[minI + 1]) minIdxs[minI] = 0; else if(maxIdxs[maxI] > minShapeInfo[minI + 1]) minIdxs[minI] = maxIdxs[maxI] % minShapeInfo[minI + 1]; else minIdxs[minI] = maxIdxs[maxI]; ++minI; } } } } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) { Nd4jLong maxIdxs[MAX_RANK]; if(shape::order(maxShapeInfo) == 'c') shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs); else shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs); Nd4jLong minIdxs[MAX_RANK]; maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen); return sub2Ind(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) { Nd4jLong maxIdxs[MAX_RANK]; if(shape::order(maxShapeInfo) == 'c') shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs); else shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs); Nd4jLong minIdxs[MAX_RANK]; maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen); return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo)); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) { const auto rankMin = shape::rank(minShapeInfo); const auto rankMax = shape::rank(maxShapeInfo); // if(rankMin >= rankMax) // throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!"); // if(rankMax > MAX_RANK/2) // throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !"); const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff Nd4jLong buffer[MAX_RANK]; Nd4jLong* indices = buffer; Nd4jLong* increment = buffer + MAX_RANK/2; int N, minI, maxI; // calculate min per-dim-indices which corresponds to absolute minIdx index if(order(minShapeInfo) == 'c') shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices); else shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices); // transform storage indices to contain per-dim max indices, purpose - memory saving // fill increment array as well if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1} for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI]; } for(maxI = 0; maxI < diff; ++maxI) { increment[maxI] = 1; indices[maxI] = 0; } } else { for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) { if(N >= 0 && dimsToExclude[N] == maxI) { increment[maxI] = 1; indices[maxI] = 0; --N; } else { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI--]; } } } maxI = rankMax-1; N = 0; int step; maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax); // nested loops - producing of absolute indices for max array while(maxI >= 0) { if(increment[maxI] != 0) { indices[maxI] += increment[maxI]; if(indices[maxI] >= maxShapeInfo[maxI+1]) { indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI] step = -1; } else { maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax); step = rankMax - 1 - maxI; } } else if(maxI == rankMax - 1) step = -1; maxI += step; } return N; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) { const auto rankMin = shape::rank(minShapeInfo); const auto rankMax = shape::rank(maxShapeInfo); // if(rankMin >= rankMax) // throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!"); // if(rankMax > MAX_RANK/2) // throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !"); const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff Nd4jLong buffer[MAX_RANK]; Nd4jLong* indices = buffer; Nd4jLong* increment = buffer + MAX_RANK/2; int N, minI, maxI; // calculate min per-dim-indices which corresponds to absolute minIdx index if(order(minShapeInfo) == 'c') shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices); else shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices); // transform storage indices to contain per-dim max indices, purpose - memory saving // fill increment array as well if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1} for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI]; } for(maxI = 0; maxI < diff; ++maxI) { increment[maxI] = 1; indices[maxI] = 0; } } else { for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) { if(N >= 0 && dimsToExclude[N] == maxI) { increment[maxI] = 1; indices[maxI] = 0; --N; } else { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI--]; } } } maxI = rankMax-1; N = 0; int step; maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices); // nested loops - producing of absolute indices for max array while(maxI >= 0) { if(increment[maxI] != 0) { indices[maxI] += increment[maxI]; if(indices[maxI] >= maxShapeInfo[maxI+1]) { indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI] step = -1; } else { maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices); step = rankMax - 1 - maxI; } } else if(maxI == rankMax - 1) step = -1; maxI += step; } return N; } INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) { buffer[0] = 2; buffer[1] = 1; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[6] = 1; buffer[7] = (int)order; nd4j::ArrayOptions::setDataType(buffer, dataType); } template <typename T1, typename T2> INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) { for (Nd4jLong e = 0; e < length; e++) to[e] = (T2) from[e]; }; ////////////////////////////////////////////////////////////////////// INLINEDEF void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets) { // set offset for first sub-array, it is equal to zero always subArrOffsets[0] = 0; // choose whether to parallelize or not if(numOfSubArrs > 1024 /*Environment::getInstance()->elementwiseThreshold()*/) { #pragma omp parallel // PRAGMA_OMP_PARALLEL_ARGS(private(indexes)) { Nd4jLong* indexes = new Nd4jLong[rank]; #pragma omp for simd schedule(guided) // PRAGMA_OMP_PARALLEL_FOR for (Nd4jLong i = 1; i < numOfSubArrs; ++i) { shape::ind2subC(rank, shape, i, indexes); subArrOffsets[i] = 0; for (int j = 0; j < rank; ++j) if(shape[j] != 1) subArrOffsets[i] += indexes[j] * strides[j]; } delete []indexes; } } else { Nd4jLong rankMinusOne = rank - 1; Nd4jLong i = 1, j = rankMinusOne; Nd4jLong* idx = new Nd4jLong[rank]; Nd4jLong* currOffset = new Nd4jLong[rank]; memset(idx, 0, sizeof(Nd4jLong) * rank); memset(currOffset, 0, sizeof(Nd4jLong) * rank); // nested loops - calculation of sub-array offsets (subArrOffsets) while(j >= 0) { if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity if(j == rankMinusOne) { // last dimension for(idx[j] = 1; idx[j] < shape[j]; ++idx[j]) subArrOffsets[i++] = subArrOffsets[i-1] + strides[j]; --j; } else if(idx[j] < shape[j] - 1) { currOffset[j] += strides[j]; subArrOffsets[i++] = j ? currOffset[j] + currOffset[j-1] : currOffset[j]; ++idx[j]; j = rankMinusOne; } else currOffset[j--] = idx[j] = 0; } delete []idx; delete []currOffset; } } ////////////////////////////////////////////////////////////////////// INLINEDEF void _CUDA_HD calcEws(Nd4jLong* shapeInfo, Nd4jLong len) { const int rank = shape::rank(shapeInfo); const Nd4jLong* shape = shape::shapeOf(shapeInfo); const Nd4jLong* strides = shape::stride(shapeInfo); const char order = shape::order(shapeInfo); Nd4jLong* ews = shape::ews(shapeInfo); if(len == -1) // calculate array length if it is not already set len = shape::length(shapeInfo); if(len <= 1) { // empty, scalar or unity-vector case *ews = 1; return; } int nonUnityDim(0); if(shape::isCommonVector(shapeInfo, nonUnityDim)) { *ews = strides[nonUnityDim]; return; } // check last(c)/first(f) dimension, it should be equal to 1 if((order == 'c' && shape[rank - 1] != 1 && strides[rank - 1] != 1) || (order == 'f' && shape[0] != 1 && strides[0] == 1)) { *ews = 0; return; } Nd4jLong correctStride = 1; if(order == 'c') { for (int i = rank - 2; i >= 0 ; i--) { if(shape[i + 1] == 1) continue; correctStride *= shape[i + 1]; if(correctStride != strides[i]) { *ews = 0; return; } } } else { for (int i = 1; i < rank; ++i) { if(shape[i - 1] == 1) continue; correctStride *= shape[i - 1]; if(correctStride != strides[i]) { *ews = 0; return; } } } *ews = 1; } } #endif /* SHAPE_H_ */
openmp-ex30.c
/* Nesting named critical regions can work */ #include <stdio.h> #include <stdlib.h> void non_safe_one (void) { int random = rand(); printf("This function does something not thread safe, like calculating %d from rand.\n",random); } void non_safe_two (void) { int random = rand(); #pragma omp critical(two) { non_safe_one(); printf("This function calculates another random number %d\n",random); } } int main(void) { #pragma omp parallel { #pragma omp critical(one) { non_safe_two(); } } return 0; }
fig4.55-critical-region.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif #define SUM_INIT 0 int main() { int i, n = 25; int sum, TID, a[n]; int ref = SUM_INIT + (n-1)*n/2; int sumLocal; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); #endif for (i=0; i<n; i++) a[i] = i; #pragma omp parallel { #pragma omp single printf("Number of threads is %d\n",omp_get_num_threads()); } sum = SUM_INIT; printf("Value of sum prior to parallel region: %d\n",sum); #pragma omp parallel default(none) shared(n,a,sum) \ private(TID,sumLocal) { TID = omp_get_thread_num(); sumLocal = 0; #pragma omp for for (i=0; i<n; i++) sumLocal += a[i]; #pragma omp critical (update_sum) { sum += sumLocal; printf("TID=%d: sumLocal = %d sum = %d\n",TID,sumLocal,sum); } } /*-- End of parallel region --*/ printf("Value of sum after parallel region: %d\n",sum); printf("Check results: sum = %d (should be %d)\n",sum,ref); return(0); }
sse.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> * 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com> * 2015 Brandon Rowlett <browlett@nvidia.com> * 2015 Ken Fast <kfast@gdeb.com> */ #if !defined(SIMDE_X86_SSE_H) #define SIMDE_X86_SSE_H #include "mmx.h" #if defined(_WIN32) #include <windows.h> #endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else SIMDE_ALIGN_TO_16 int8_t i8[16]; SIMDE_ALIGN_TO_16 int16_t i16[8]; SIMDE_ALIGN_TO_16 int32_t i32[4]; SIMDE_ALIGN_TO_16 int64_t i64[2]; SIMDE_ALIGN_TO_16 uint8_t u8[16]; SIMDE_ALIGN_TO_16 uint16_t u16[8]; SIMDE_ALIGN_TO_16 uint32_t u32[4]; SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128[1]; SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif SIMDE_ALIGN_TO_16 simde_float32 f32[4]; SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_ALIGN_TO_16 __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 int8x16_t neon_i8; SIMDE_ALIGN_TO_16 int16x8_t neon_i16; SIMDE_ALIGN_TO_16 int32x4_t neon_i32; SIMDE_ALIGN_TO_16 int64x2_t neon_i64; SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; #if defined(SIMDE_X86_SSE_NATIVE) typedef __m128 simde__m128; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32x4_t simde__m128; #elif defined(SIMDE_WASM_SIMD128_NATIVE) typedef v128_t simde__m128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) typedef simde__m128 __m128; #endif HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect"); HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned"); #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_private(simde__m128_private v) { simde__m128 r; simde_memcpy(&r, &v, sizeof(r)); return r; } SIMDE_FUNCTION_ATTRIBUTES simde__m128_private simde__m128_to_private(simde__m128 v) { simde__m128_private r; simde_memcpy(&r, &v, sizeof(r)); return r; } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64) #endif #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32) #if defined(SIMDE_BUG_GCC_95782) SIMDE_FUNCTION_ATTRIBUTES SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128_to_altivec_f32(simde__m128 value) { simde__m128_private r_ = simde__m128_to_private(value); return r_.altivec_f32; } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) { simde__m128_private r_; r_.altivec_f32 = value; return simde__m128_from_private(r_); } #else SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32) #endif #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128); #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST, SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN, SIMDE_MM_ROUND_UP = _MM_ROUND_UP, SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO #else SIMDE_MM_ROUND_NEAREST = 0x0000, SIMDE_MM_ROUND_DOWN = 0x2000, SIMDE_MM_ROUND_UP = 0x4000, SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000 #endif }; #if defined(_MM_FROUND_TO_NEAREST_INT) # define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT # define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF # define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF # define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO # define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION # define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC # define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC #else # define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00 # define SIMDE_MM_FROUND_TO_NEG_INF 0x01 # define SIMDE_MM_FROUND_TO_POS_INF 0x02 # define SIMDE_MM_FROUND_TO_ZERO 0x03 # define SIMDE_MM_FROUND_CUR_DIRECTION 0x04 # define SIMDE_MM_FROUND_RAISE_EXC 0x00 # define SIMDE_MM_FROUND_NO_EXC 0x08 #endif #define SIMDE_MM_FROUND_NINT \ (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_FLOOR \ (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_CEIL \ (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_TRUNC \ (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_RINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_NEARBYINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC) #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT) # define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT # define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF # define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF # define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO # define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION # define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC # define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT # define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR # define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL # define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC # define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT # define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT #endif #if defined(_MM_EXCEPT_INVALID) # define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID #else # define SIMDE_MM_EXCEPT_INVALID (0x0001) #endif #if defined(_MM_EXCEPT_DENORM) # define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM #else # define SIMDE_MM_EXCEPT_DENORM (0x0002) #endif #if defined(_MM_EXCEPT_DIV_ZERO) # define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO #else # define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004) #endif #if defined(_MM_EXCEPT_OVERFLOW) # define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW #else # define SIMDE_MM_EXCEPT_OVERFLOW (0x0008) #endif #if defined(_MM_EXCEPT_UNDERFLOW) # define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW #else # define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010) #endif #if defined(_MM_EXCEPT_INEXACT) # define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT #else # define SIMDE_MM_EXCEPT_INEXACT (0x0020) #endif #if defined(_MM_EXCEPT_MASK) # define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK #else # define SIMDE_MM_EXCEPT_MASK \ (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \ SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \ SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID #define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM #define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO #define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW #define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW #define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT #define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK #endif #if defined(_MM_MASK_INVALID) # define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID #else # define SIMDE_MM_MASK_INVALID (0x0080) #endif #if defined(_MM_MASK_DENORM) # define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM #else # define SIMDE_MM_MASK_DENORM (0x0100) #endif #if defined(_MM_MASK_DIV_ZERO) # define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO #else # define SIMDE_MM_MASK_DIV_ZERO (0x0200) #endif #if defined(_MM_MASK_OVERFLOW) # define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW #else # define SIMDE_MM_MASK_OVERFLOW (0x0400) #endif #if defined(_MM_MASK_UNDERFLOW) # define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW #else # define SIMDE_MM_MASK_UNDERFLOW (0x0800) #endif #if defined(_MM_MASK_INEXACT) # define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT #else # define SIMDE_MM_MASK_INEXACT (0x1000) #endif #if defined(_MM_MASK_MASK) # define SIMDE_MM_MASK_MASK _MM_MASK_MASK #else # define SIMDE_MM_MASK_MASK \ (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \ SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \ SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID #define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM #define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO #define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW #define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW #define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT #define _MM_MASK_MASK SIMDE_MM_MASK_MASK #endif #if defined(_MM_FLUSH_ZERO_MASK) # define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK #else # define SIMDE_MM_FLUSH_ZERO_MASK (0x8000) #endif #if defined(_MM_FLUSH_ZERO_ON) # define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON #else # define SIMDE_MM_FLUSH_ZERO_ON (0x8000) #endif #if defined(_MM_FLUSH_ZERO_OFF) # define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF #else # define SIMDE_MM_FLUSH_ZERO_OFF (0x0000) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK #define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON #define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF #endif SIMDE_FUNCTION_ATTRIBUTES unsigned int SIMDE_MM_GET_ROUNDING_MODE(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _MM_GET_ROUNDING_MODE(); #elif defined(SIMDE_HAVE_FENV_H) unsigned int vfe_mode; switch (fegetround()) { #if defined(FE_TONEAREST) case FE_TONEAREST: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; #endif #if defined(FE_TOWARDZERO) case FE_TOWARDZERO: vfe_mode = SIMDE_MM_ROUND_DOWN; break; #endif #if defined(FE_UPWARD) case FE_UPWARD: vfe_mode = SIMDE_MM_ROUND_UP; break; #endif #if defined(FE_DOWNWARD) case FE_DOWNWARD: vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO; break; #endif default: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; } return vfe_mode; #else return SIMDE_MM_ROUND_NEAREST; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE() #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_ROUNDING_MODE(a); #elif defined(SIMDE_HAVE_FENV_H) int fe_mode = FE_TONEAREST; switch (a) { #if defined(FE_TONEAREST) case SIMDE_MM_ROUND_NEAREST: fe_mode = FE_TONEAREST; break; #endif #if defined(FE_TOWARDZERO) case SIMDE_MM_ROUND_TOWARD_ZERO: fe_mode = FE_TOWARDZERO; break; #endif #if defined(FE_DOWNWARD) case SIMDE_MM_ROUND_DOWN: fe_mode = FE_DOWNWARD; break; #endif #if defined(FE_UPWARD) case SIMDE_MM_ROUND_UP: fe_mode = FE_UPWARD; break; #endif default: return; } fesetround(fe_mode); #else (void) a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t SIMDE_MM_GET_FLUSH_ZERO_MODE (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; #else return SIMDE_MM_FLUSH_ZERO_OFF; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_FLUSH_ZERO_MODE(a); #else (void) a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_mm_getcsr (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr(); #else return SIMDE_MM_GET_ROUNDING_MODE(); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_getcsr() simde_mm_getcsr() #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_setcsr (uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_setcsr(a); #else SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a)); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setcsr(a) simde_mm_setcsr(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding) SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) { simde__m128_private r_, a_ = simde__m128_to_private(a); (void) lax_rounding; /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, * but in ARMv8+ the rounding mode is ignored and nearest is always * used, so we treat ARMv7 as having a rounding mode but ARMv8 as * not. */ #if \ defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \ defined(SIMDE_ARM_NEON_A32V8) if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; #endif switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndnq_f32(a_.neon_f32); #elif defined(simde_math_roundevenf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_roundevenf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_floorf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_truncf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; default: HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); } return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) #define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding)) #else #define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps(e3, e2, e1, e0); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 }; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); #else r_.f32[0] = e0; r_.f32[1] = e1; r_.f32[2] = e2; r_.f32[3] = e3; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps1 (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps1(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) (void) a; return vec_splats(a); #else return simde_mm_set_ps(a, a, a, a); #endif } #define simde_mm_set1_ps(a) simde_mm_set_ps1(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps1(a) simde_mm_set_ps1(a) # define _mm_set1_ps(a) simde_mm_set1_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_move_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_move_ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { 16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 + b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] + b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ps(a, b) simde_mm_add_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0); float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); // the upper values in the result must be the remnants of <a>. r_.neon_f32 = vaddq_f32(a_.neon_f32, value); #else r_.f32[0] = a_.f32[0] + b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ss(a, b) simde_mm_add_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_and_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_and_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 & b_.i32; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_and_ps(a, b) simde_mm_and_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_andnot_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_andnot_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32 & b_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]) & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_xor_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_xor_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] ^ b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_or_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_or_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] | b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_or_ps(a, b) simde_mm_or_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512VL_NATIVE) __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55)); #elif defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) { /* This function is for when you want to blend two elements together * according to a mask. It is similar to _mm_blendv_ps, except that * it is undefined whether the blend is based on the highest bit in * each lane (like blendv) or just bitwise operations. This allows * us to implement the function efficiently everywhere. * * Basically, you promise that all the lanes in mask are either 0 or * ~0. */ #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_ps(a, b, mask); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b), mask_ = simde__m128_to_private(mask); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u16); SIMDE_CONVERT_VECTOR_(wb, b_.u16); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u16, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b) # define _m_pavgw(a, b) simde_mm_avg_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u8); SIMDE_CONVERT_VECTOR_(wb, b_.u8); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u8, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b) # define _m_pavgb(a, b) simde_mm_avg_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_abs_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) simde_float32 mask_; uint32_t u32_ = UINT32_C(0x7FFFFFFF); simde_memcpy(&mask_, &u32_, sizeof(u32_)); return _mm_and_ps(_mm_set1_ps(mask_), a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = vec_abs(a_.altivec_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_fabsf(a_.f32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpge_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpgt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION) /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) is missing from XL C/C++ v16.1.1, though the documentation (table 89 on page 432 of the IBM XL C/C++ for Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Note: NEON does not have ordered compare builtin Need to compare a eq a and b eq b to check for NaN Do AND of results to get final */ uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpunord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #else return a_.f32[0] == b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #else return a_.f32[0] >= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #else return a_.f32[0] > b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #else return a_.f32[0] <= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #else return a_.f32[0] < b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #else return a_.f32[0] != b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { simde__m128_private r_, dest_ = simde__m128_to_private(dest), src_ = simde__m128_to_private(src); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0))); r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) #if !defined(HEDLEY_IBM_VERSION) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f)); r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos); #elif defined(SIMDE_IEEE754_STORAGE) (void) src_; (void) dest_; simde__m128 sign_pos = simde_mm_set1_ps(-0.0f); r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]); } #endif return simde__m128_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) { return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_pi2ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_si2ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_si2ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); r_.i32[1] = a_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); #if !defined(SIMDE_FAST_CONVERSION_RANGE) return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN; #else return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { simde_float32 v = a_.i16[i]; r_.f32[i] = v; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32x2_ps(a, b); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32); SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32); #else r_.f32[0] = (simde_float32) a_.i32[0]; r_.f32[1] = (simde_float32) a_.i32[1]; r_.f32[2] = (simde_float32) b_.i32[0]; r_.f32[3] = (simde_float32) b_.i32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8)))); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]); r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]); r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]); r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi16 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi16(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi32(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { simde_float32 v = simde_math_roundf(a_.f32[i]); #if !defined(SIMDE_FAST_CONVERSION_RANGE) r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; #else r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); #endif } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi8 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi8(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471) /* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to * i16, combine with an all-zero vector of i16 (which will become the upper * half), narrow to i8. */ float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)); float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)); float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min)); r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)) r_.i8[i] = INT8_MAX; else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)) r_.i8[i] = INT8_MIN; else r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i])); } /* Note: the upper half is undefined */ #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (simde_float32) a_.u16[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8)))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtsi32_ss(a, b); #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtsi64_ss(a, b); #else return _mm_cvtsi64x_ss(a, b); #endif #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64)) # define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32 simde_mm_cvtss_f32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtss_f32(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_f32(a_.neon_f32, 0); #else return a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtss_si32 (simde__m128 a) { return simde_mm_cvt_ss2si(a); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvtss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtss_si64(a); #else return _mm_cvtss_si64x(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0))); #else return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64)) # define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { simde_float32 v = a_.f32[i]; #if !defined(SIMDE_FAST_CONVERSION_RANGE) r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; #else r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); #endif } #endif return simde__m64_from_private(r_); #endif } #define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a)) # define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtt_ss2si(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else simde_float32 v = a_.f32[0]; #if !defined(SIMDE_FAST_CONVERSION_RANGE) return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN; #else return SIMDE_CONVERT_FTOI(int32_t, v); #endif #endif #endif } #define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a)) # define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvttss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER) #if defined(__PGI) return _mm_cvttss_si64x(a); #else return _mm_cvttss_si64(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64)) # define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip0 = vrecpeq_f32(b_.neon_f32); float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] / b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ps(a, b) simde_mm_div_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = a_.f32[0] / b_.f32[0]; SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ss(a, b) simde_mm_div_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int16_t simde_mm_extract_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) # if defined(SIMDE_BUG_CLANG_44589) # define simde_mm_extract_pi16(a, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8)) # define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private r_, a_ = simde__m64_to_private(a); r_.i64[0] = a_.i64[0]; r_.i16[imm8] = i; return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # if defined(SIMDE_BUG_CLANG_44589) # define ssimde_mm_insert_pi16(a, i, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ (_mm_insert_pi16((a), (i), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) # define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load1_ps (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_dup_f32(mem_addr); #else r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr)); #endif return simde__m128_from_private(r_); #endif } #define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) # define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ss (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ss(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0); #else r_.f32[0] = *mem_addr; r_.i32[1] = 0; r_.i32[2] = 0; r_.i32[3] = 0; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr))); #else simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr); r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) #else #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) #endif #endif /* The SSE documentation says that there are no alignment requirements for mem_addr. Unfortunately they used the __m64 type for the argument which is supposed to be 8-byte aligned, so some compilers (like clang with -Wcast-align) will generate a warning if you try to cast, say, a simde_float32* to a simde__m64* for this function. I think the choice of argument type is unfortunate, but I do think we need to stick to it here. If there is demand I can always add something like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vld1_f32( HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32)); #else simde__m64_private b_; simde_memcpy(&b_, mem_addr, sizeof(b_)); r_.i32[0] = b_.i32[0]; r_.i32[1] = b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) #else #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) #endif #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else simde__m128_private r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrev64q_f32(v_.neon_f32); r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_reve(v_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); #else r_.f32[0] = v_.f32[3]; r_.f32[1] = v_.f32[2]; r_.f32[2] = v_.f32[1]; r_.f32[3] = v_.f32[0]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadu_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #else simde_memcpy(&r_, mem_addr, sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr)); #else simde__m64_private a_ = simde__m64_to_private(a), mask_ = simde__m64_to_private(mask); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) if (mask_.i8[i] < 0) mem_addr[i] = a_.i8[i]; #endif } #define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) # define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b) # define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128)); #elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ps(a, b) simde_mm_max_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b) # define _m_pmaxub(a, b) simde_mm_max_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ss(a, b) simde_mm_max_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b) # define _m_pminsw(a, b) simde_mm_min_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); #else r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); #endif return simde__m128_from_private(r_); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); #else r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); #endif return simde__m128_from_private(r_); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) simde__m128 mask = simde_mm_cmplt_ps(a, b); return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ps(a, b) simde_mm_min_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminub(a, b) simde_mm_min_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b) # define _m_pminub(a, b) simde_mm_min_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ss(a, b) simde_mm_min_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movehl_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movehl_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergel(b_.altivec_i64, a_.altivec_i64)); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else r_.f32[0] = b_.f32[2]; r_.f32[1] = b_.f32[3]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movelh_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergeh(a_.altivec_i64, b_.altivec_i64)); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_pi8 (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_pi8(a); #else simde__m64_private a_ = simde__m64_to_private(a); int r = 0; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) uint8x8_t input = a_.neon_u8; const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0}; const uint8x8_t mask_and = vdup_n_u8(0x80); const int8x8_t mask_shift = vld1_s8(xr); const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift); uint8x8_t lo = mask_result; r = vaddv_u8(lo); #else const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]); SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < nmemb ; i++) { r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i); } #endif return r; #endif } #define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a) # define _m_pmovmskb(a) simde_mm_movemask_pi8(a) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_ps(a); #else int r = 0; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) static const int32_t shift_amount[] = { 0, 1, 2, 3 }; const int32x4_t shift = vld1q_s32(shift_amount); uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) { r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i; } #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_ps(a) simde_mm_movemask_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] * b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] * b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_mulhi_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16); const uint32x4_t t2 = vshrq_n_u32(t1, 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16))); } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b) # define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION) #define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0) #define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1) #define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2) #define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3) #define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4) #define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5) #define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6) #define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7) #else #define SIMDE_MM_HINT_NTA 0 #define SIMDE_MM_HINT_T0 1 #define SIMDE_MM_HINT_T1 2 #define SIMDE_MM_HINT_T2 3 #define SIMDE_MM_HINT_ENTA 4 #define SIMDE_MM_HINT_ET0 5 #define SIMDE_MM_HINT_ET1 6 #define SIMDE_MM_HINT_ET2 7 #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wreserved-id-macro") _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #endif #undef _MM_HINT_NTA #define _MM_HINT_NTA SIMDE_MM_HINT_NTA #undef _MM_HINT_T0 #define _MM_HINT_T0 SIMDE_MM_HINT_T0 #undef _MM_HINT_T1 #define _MM_HINT_T1 SIMDE_MM_HINT_T1 #undef _MM_HINT_T2 #define _MM_HINT_T2 SIMDE_MM_HINT_T2 #undef _MM_HINT_ETNA #define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA #undef _MM_HINT_ET0 #define _MM_HINT_ET0 SIMDE_MM_HINT_ET0 #undef _MM_HINT_ET1 #define _MM_HINT_ET1 SIMDE_MM_HINT_ET1 #undef _MM_HINT_ET1 #define _MM_HINT_ET2 SIMDE_MM_HINT_ET2 HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch (char const* p, int i) { #if defined(HEDLEY_GCC_VERSION) __builtin_prefetch(p); #else (void) p; #endif (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */ #define simde_mm_prefetch(p, i) \ (__extension__({ \ HEDLEY_DIAGNOSTIC_PUSH \ HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ _mm_prefetch((p), (i)); \ HEDLEY_DIAGNOSTIC_POP \ })) #else #define simde_mm_prefetch(p, i) _mm_prefetch(p, i) #endif #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_negate_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip = vrecpeq_f32(a_.neon_f32); #if SIMDE_ACCURACY_PREFERENCE > 0 for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) { recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32)); } #endif r_.neon_f32 = recip; #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_re(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.f32 = 1.0f / a_.f32; #elif defined(SIMDE_IEEE754_STORAGE) /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { int32_t ix; simde_float32 fx = a_.f32[i]; simde_memcpy(&ix, &fx, sizeof(ix)); int32_t x = INT32_C(0x7EF311C3) - ix; simde_float32 temp; simde_memcpy(&temp, &x, sizeof(temp)); r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); } #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ps(a) simde_mm_rcp_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); r_.f32[0] = 1.0f / a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ss(a) simde_mm_rcp_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_rsqrte(a_.altivec_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); #else simde_float32 x = a_.f32[i]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[i] = x; #endif } #elif defined(simde_math_sqrtf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_IEEE754_STORAGE) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1); #else simde_float32 x = a_.f32[0]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[0] = x; #endif } r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #elif defined(simde_math_sqrtf) r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_sad_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); uint16_t r0 = t[0] + t[1] + t[2] + t[3]; r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); #else uint16_t sum = 0; #if defined(SIMDE_HAVE_STDLIB_H) SIMDE_VECTORIZE_REDUCTION(+:sum) for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); } r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); r_.i16[1] = 0; r_.i16[2] = 0; r_.i16[3] = 0; #else HEDLEY_UNREACHABLE(); #endif #endif return simde__m64_from_private(r_); #endif } #define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b) # define _m_psadbw(a, b) simde_mm_sad_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ss (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ss(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0); #else return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ss(a) simde_mm_set_ss(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setr_ps(e3, e2, e1, e0); #else return simde_mm_set_ps(e0, e1, e2, e3); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setzero_ps (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setzero_ps(); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(SIMDE_FLOAT32_C(0.0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_splats(SIMDE_FLOAT32_C(0.0)); #else simde__m128 r; simde_memset(&r, 0, sizeof(r)); return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setzero_ps() simde_mm_setzero_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_undefined_ps (void) { simde__m128_private r_; #if defined(SIMDE_HAVE_UNDEFINED128) r_.n = _mm_undefined_ps(); #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) r_ = simde__m128_to_private(simde_mm_setzero_ps()); #endif return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_undefined_ps() simde_mm_undefined_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_setone_ps (void) { simde__m128 t = simde_mm_setzero_ps(); return simde_mm_cmpeq_ps(t, t); } SIMDE_FUNCTION_ATTRIBUTES void simde_mm_sfence (void) { /* TODO: Use Hedley. */ #if defined(SIMDE_X86_SSE_NATIVE) _mm_sfence(); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) __atomic_thread_fence(__ATOMIC_SEQ_CST); #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) __atomic_thread_fence(__ATOMIC_SEQ_CST); #else atomic_thread_fence(memory_order_seq_cst); #endif #elif defined(_MSC_VER) MemoryBarrier(); #elif HEDLEY_HAS_EXTENSION(c_atomic) __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) __sync_synchronize(); #elif defined(_OPENMP) #pragma omp critical(simde_mm_sfence_) { } #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sfence() simde_mm_sfence() #endif #define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \ const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \ simde__m64_from_private((simde__m64_private) { .i16 = \ SIMDE_SHUFFLE_VECTOR_(16, 8, \ (simde__tmp_a_).i16, \ (simde__tmp_a_).i16, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3)) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_shuffle_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) { r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3]; } HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wconditional-uninitialized") # pragma clang diagnostic ignored "-Wconditional-uninitialized" #endif return simde__m64_from_private(r_); HEDLEY_DIAGNOSTIC_POP } #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8) #else # define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8) # define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_shuffle_ps(a, b, imm8) \ __extension__({ \ float32x4_t ret; \ ret = vmovq_n_f32( \ vgetq_lane_f32(a, (imm8) & (0x3))); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \ ret, 1); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \ ret, 2); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \ ret, 3); \ }) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ simde__m128_from_private((simde__m128_private) { .f32 = \ SIMDE_SHUFFLE_VECTOR_(32, 16, \ simde__m128_to_private(a).f32, \ simde__m128_to_private(b).f32, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[(imm8 >> 0) & 3]; r_.f32[1] = a_.f32[(imm8 >> 2) & 3]; r_.f32[2] = b_.f32[(imm8 >> 4) & 3]; r_.f32[3] = b_.f32[(imm8 >> 6) & 3]; return simde__m128_from_private(r_); } #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vsqrtq_f32(a_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t est = vrsqrteq_f32(a_.neon_f32); for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) { est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est); } r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = vec_sqrt(a_.altivec_f32); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) { r_.f32[i] = simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #elif defined(simde_math_sqrtf) r_.f32[0] = simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else simde_memcpy(mem_addr, &a_, sizeof(a)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps1(mem_addr_, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); #elif defined(SIMDE_SHUFFLE_VECTOR_) simde__m128_private tmp_; tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); simde_mm_store_ps(mem_addr_, tmp_.f32); #else SIMDE_VECTORIZE_ALIGNED(mem_addr_:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr_[i] = a_.f32[0]; } #endif #endif } #define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) # define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ss(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_lane_f32(mem_addr, a_.neon_f32, 0); #else *mem_addr = a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32)); #else simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->neon_f32 = vget_low_f32(a_.neon_f32); #else dest_->f32[0] = a_.f32[0]; dest_->f32[1] = a_.f32[1]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storer_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_reve(a_.altivec_f32), 0, mem_addr); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t tmp = vrev64q_f32(a_.neon_f32); vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2)); #elif defined(SIMDE_SHUFFLE_VECTOR_) a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeu_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) vec_vsx_st(a_.altivec_f32, 0, mem_addr); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] - b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] - b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] == b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] == b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] >= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] >= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] > b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] > b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] <= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] <= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] < b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] < b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] != b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] != b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b)) #endif #if defined(SIMDE_X86_SSE_NATIVE) # if defined(__has_builtin) # if __has_builtin(__builtin_ia32_undef128) # define SIMDE_HAVE_UNDEFINED128 # endif # elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) # define SIMDE_HAVE_UNDEFINED128 # endif #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpackhi_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_high_f32(a_.neon_f32); float32x2_t b1 = vget_high_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7); #else r_.f32[0] = a_.f32[2]; r_.f32[1] = b_.f32[2]; r_.f32[2] = a_.f32[3]; r_.f32[3] = b_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpacklo_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_low_f32(a_.neon_f32); float32x2_t b1 = vget_low_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = b_.f32[0]; r_.f32[2] = a_.f32[1]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr), a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest->i64[0] = vget_lane_s64(a_.neon_i64, 0); #else dest->i64[0] = a_.i64[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_stream_ps(mem_addr, a); #elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m128_private a_ = simde__m128_to_private(a); __builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr)); #else simde_mm_store_ps(mem_addr, a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ vget_low_f32(ROW23.val[0])); \ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ vget_low_f32(ROW23.val[1])); \ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ vget_high_f32(ROW23.val[0])); \ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ vget_high_f32(ROW23.val[1])); \ } while (0) #else #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ simde__m128 tmp3, tmp2, tmp1, tmp0; \ tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ row0 = simde_mm_movelh_ps(tmp0, tmp2); \ row1 = simde_mm_movehl_ps(tmp2, tmp0); \ row2 = simde_mm_movelh_ps(tmp1, tmp3); \ row3 = simde_mm_movehl_ps(tmp3, tmp1); \ } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_SSE_H) */
par_csr_matop.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" /* RDF: The following prototype already exists in _hypre_parcsr_ls.h, so * something needs to be reorganized here.*/ #ifdef __cplusplus extern "C" { #endif hypre_CSRMatrix * hypre_ExchangeRAPData( hypre_CSRMatrix *RAP_int, hypre_ParCSRCommPkg *comm_pkg_RT); /* reference seems necessary to prevent a problem with the "headers" script... */ #ifdef __cplusplus } #endif /* The following function was formerly part of hypre_ParMatmul but was removed so it can also be used for multiplication of Boolean matrices */ void hypre_ParMatmul_RowSizes( HYPRE_Int ** C_diag_i, HYPRE_Int ** C_offd_i, /*HYPRE_Int ** B_marker,*/ HYPRE_Int * A_diag_i, HYPRE_Int * A_diag_j, HYPRE_Int * A_offd_i, HYPRE_Int * A_offd_j, HYPRE_Int * B_diag_i, HYPRE_Int * B_diag_j, HYPRE_Int * B_offd_i, HYPRE_Int * B_offd_j, HYPRE_Int * B_ext_diag_i, HYPRE_Int * B_ext_diag_j, HYPRE_Int * B_ext_offd_i, HYPRE_Int * B_ext_offd_j, HYPRE_Int * map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int ii, size, rest; /* First pass begins here. Computes sizes of C rows. Arrays computed: C_diag_i, C_offd_i, B_marker Arrays needed: (11, all HYPRE_Int*) A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_i, B_ext_j, col_map_offd_B, col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j, Scalars computed: C_diag_size, C_offd_size Scalars needed: num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare, first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B */ *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3) #endif /*for (ii=0; ii < num_threads; ii++)*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[i1] = jj_row_begin_diag; (*C_offd_i)[i1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { (*C_diag_i)[i1] += jj_count_diag; (*C_offd_i)[i1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } } /* end parallel loop */ /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array); hypre_TFree(jj_count_offd_array); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_Int last_col_diag_B; HYPRE_Int *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_Int *col_map_offd_C; HYPRE_Int *map_B_to_C=NULL; hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int C_diag_size; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_Int *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_Int B_ext_offd_size; HYPRE_Int n_rows_A, n_cols_A; HYPRE_Int n_rows_B, n_cols_B; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads); if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } if ( num_rows_diag_A==num_cols_diag_B) allsquare = 1; /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + num_cols_diag_B -1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size); } hypre_UnorderedIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedIntSetPut(&set, Bs_ext_j[j]); B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } col_map_offd_C = hypre_UnorderedIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedIntSetDestroy(&set); hypre_UnorderedIntMap col_map_offd_C_inverse; hypre_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); if (num_cols_offd_C) { hypre_UnorderedIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array); hypre_TFree(my_offd_array); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_LowerBound(col_map_offd_B, col_map_offd_B + num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size); } if (B_ext_offd_size || num_cols_offd_B) temp = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size+num_cols_offd_B); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { HYPRE_Int value; hypre_qsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_C); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=ns; i < ne; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_BinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C); } /* end parallel region */ hypre_TFree(my_diag_array); hypre_TFree(my_offd_array); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes( /*&C_diag_i, &C_offd_i, &B_marker,*/ &C_diag_i, &C_offd_i, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C ); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j); hypre_TFree(B_ext_diag_data); } hypre_TFree(B_ext_offd_i); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j); hypre_TFree(B_ext_offd_data); } if (num_cols_offd_B) hypre_TFree(map_B_to_C); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_Int ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_Int ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_Int first_col_diag, HYPRE_Int * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_Int * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_Int *B_int_j; HYPRE_Int *B_ext_i; HYPRE_Int * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_Int * B_int_row_map; HYPRE_Int * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int first_row_index = row_starts[0]; #else HYPRE_Int first_row_index = row_starts[my_id]; HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); #endif num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_Int, send_map_starts[num_sends]+1 ); B_ext_row_map = hypre_CTAlloc( HYPRE_Int, num_rows_B_ext+1 ); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_Int send_proc_first_row = row_starts[send_proc]; HYPRE_Int send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_Int c_global = col_map_offd[c]; if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_Int c_global = col_map_offd[c]; if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (11,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_Int, jdata_send_map_starts[num_sends]); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends]); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_Int send_proc_first_row = row_starts[send_proc]; HYPRE_Int send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_Int c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) #else if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_Int c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) #else if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = diag_j[k]+first_col_diag; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i=0; i < num_recvs; i++) for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) B_ext_i[j+1] += B_ext_i[j]; *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_Int, *num_nonzeros); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros); B_ext_data = *pB_ext_data; }; for (i=0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(11,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts); hypre_TFree(jdata_recv_vec_starts); hypre_TFree(tmp_comm_pkg); hypre_TFree(B_int_i); if ( find_row_map ) hypre_TFree(B_int_row_map); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_Int ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_Int ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_Int first_col_diag, HYPRE_Int * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_Int * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_Int *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_Int *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data); } return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_Int value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_Int first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_Int *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_Int *col_map_offd_AT; HYPRE_Int *row_starts_AT; HYPRE_Int *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) AT_tmp_data = hypre_CSRMatrixData(AT_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]); for (i=0; i < AT_tmp_i[num_cols_offd]; i++) AT_tmp_j[i] += first_row_index; for (i=0; i < num_cols_offd; i++) AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose( A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1); tmp_send_map_starts[0] = send_map_starts[0]; for (i=0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i=0; i < num_cols; i++) AT_offd_i[i+1] += AT_offd_i[i]; tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i=0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_Int,tmp_send_map_starts[num_sends]); comm_handle = hypre_ParCSRCommHandleCreate(12, tmp_comm_pkg, AT_tmp_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex,tmp_send_map_starts[num_sends]); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts); hypre_TFree(tmp_send_map_starts); hypre_TFree(tmp_comm_pkg); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols]); if (data) AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols]); } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i=0; i < num_sends; i++) { for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k=0; k < AT_buf_i[j]; k++) { if (data) AT_offd_data[index] = AT_buf_data[counter]; AT_offd_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i=num_cols; i > 0; i--) AT_offd_i[i] = AT_offd_i[i-1]; AT_offd_i[0] = 0; if (counter) { hypre_qsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i=1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) col_map_offd_AT = hypre_CTAlloc(HYPRE_Int, num_cols_offd_AT); else col_map_offd_AT = NULL; for (i=0; i < num_cols_offd_AT; i++) col_map_offd_AT[i] = AT_buf_j[i]; hypre_TFree(AT_buf_i); hypre_TFree(AT_buf_j); if (data) hypre_TFree(AT_buf_data); for (i=0; i < counter; i++) AT_offd_j[i] = hypre_BinarySearch(col_map_offd_AT,AT_offd_j[i], num_cols_offd_AT); } AT_offd = hypre_CSRMatrixCreate(num_cols,num_cols_offd_AT,counter); hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts_AT = hypre_CTAlloc(HYPRE_Int, 2); for (i=0; i < 2; i++) row_starts_AT[i] = col_starts[i]; if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_Int,2); for (i=0; i < 2; i++) col_starts_AT[i] = row_starts[i]; } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = row_starts_AT[1]-first_row_index_AT ; local_num_cols_AT = col_starts_AT[1]-first_col_diag_AT; #else row_starts_AT = hypre_CTAlloc(HYPRE_Int,num_procs+1); for (i=0; i < num_procs+1; i++) row_starts_AT[i] = col_starts[i]; if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_Int,num_procs+1); for (i=0; i < num_procs+1; i++) col_starts_AT[i] = row_starts[i]; } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[my_id]; first_col_diag_AT = col_starts_AT[my_id]; local_num_rows_AT = row_starts_AT[my_id+1]-first_row_index_AT ; local_num_cols_AT = col_starts_AT[my_id+1]-first_col_diag_AT; #endif AT = hypre_CTAlloc(hypre_ParCSRMatrix,1); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) hypre_ParCSRMatrixOwnsColStarts(AT) = 0; hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_Int nrows_G, ncols_G, *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int)); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = (HYPRE_Int *) malloc((nrows_G+1) * sizeof(HYPRE_Int)); G_diag_j = (HYPRE_Int *) malloc(T_diag_i[ncols_G] * sizeof(HYPRE_Int)); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; free(counts); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = (HYPRE_Int *) malloc(2 * ncols_G * sizeof(HYPRE_Int)); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int)); edges_marked = (HYPRE_Int *) malloc(ncols_G * sizeof(HYPRE_Int)); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int)); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } free(nodes_marked); free(queue); free(GT_diag_mat); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = (HYPRE_Int *) malloc((nsends+nrecvs) * sizeof(HYPRE_Int)); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int)); recv_cnts = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = (HYPRE_Int *) malloc(pgraph_i[nprocs] * sizeof(HYPRE_Int)); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); free(recv_cnts); /* BFS on the processor graph to determine parent and children */ nodes_marked = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int)); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int)); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = (HYPRE_Int *) malloc(n_children * sizeof(HYPRE_Int)); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } free(nodes_marked); free(queue); free(pgraph_i); free(pgraph_j); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) free(children); /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = (HYPRE_Int *) malloc((tree_size+1) * sizeof(HYPRE_Int)); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; free(edges_marked); if (G_type != 0) { free(G_diag_i); free(G_diag_j); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nindices, *indices, nrows_A, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *itmp_array, *exp_indices; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int global_nrows, global_ncols, *row_starts, *col_starts, nrows, nnz; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int)); proc_offsets2 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = (HYPRE_Int *) malloc(nrows_A * sizeof(HYPRE_Int)); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; #ifdef HYPRE_NO_GLOBAL_PARTITION /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; #else global_nrows = proc_offsets1[nprocs]; global_ncols = proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets1[i]; col_starts[i] = proc_offsets1[i]; } #endif A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = proc_offsets1[nprocs]; global_ncols = proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets1[i]; col_starts[i] = proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) hypre_error(HYPRE_ERROR_GENERIC); /*hypre_printf("WARNING WARNING WARNING\n");*/ diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = proc_offsets2[nprocs]; global_ncols = proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets2[i]; col_starts[i] = proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = proc_offsets2[nprocs]; global_ncols = proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets2[i]; col_starts[i] = proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nindices, *indices, nrows_A, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *itmp_array, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int global_nrows, global_ncols, *row_starts, *col_starts, nrows, nnz; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int)); proc_offsets2 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = (HYPRE_Int *) malloc(nrows_A * sizeof(HYPRE_Int)); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixCompleteClone(B); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax); for (i=0; i < num_rows; i++) A_marker[i] = -1; for (i=ns; i < ne; i++) D_tmp[i] = 1.0/d[i]; num_cols = C_diag_i[ns]; for (i=ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i=0; i < num_cols_offd_B; i++) A_marker[i] = -1; num_cols = C_offd_i[ns]; for (i=ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B]); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg,1); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp); if (num_cols_offd_A) hypre_TFree(map_to_B); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_Int *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_Int *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_Int *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Int first_col_diag_C; HYPRE_Int last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_Int *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_Int n_rows_A, n_cols_A; HYPRE_Int n_rows_B, n_cols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_Int value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_Int first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; C_ext = hypre_ExchangeRAPData(C_int, comm_pkg_A); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1); temp = hypre_CTAlloc(HYPRE_Int, C_ext_size+num_cols_offd_B); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) temp[C_ext_offd_size++] = C_ext_j[j]; else C_ext_diag_size++; C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { hypre_qsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = C_ext_j[j] - first_col_diag_C; C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } for (i=0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C); for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } break; } } hypre_TFree(B_marker); hypre_TFree(B_marker_offd); } /*end parallel region */ hypre_TFree(C_diag_array); hypre_TFree(C_offd_array); } /*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ #ifdef HYPRE_NO_GLOBAL_PARTITION /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = col_starts_A[1]-first_row_index ; first_col_diag = col_starts_B[0]; local_num_cols = col_starts_B[1]-first_col_diag; #else first_row_index = col_starts_A[my_id]; local_num_rows = col_starts_A[my_id+1]-first_row_index; first_col_diag = col_starts_B[my_id]; local_num_cols = col_starts_B[my_id+1]-first_col_diag; #endif C = hypre_CTAlloc(hypre_ParCSRMatrix, 1); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A; hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) hypre_ParCSRMatrixDiag(C) = C_diag; else hypre_ParCSRMatrixDiag(C) = C_tmp_diag; if (C_offd) hypre_ParCSRMatrixOffd(C) = C_offd; else hypre_ParCSRMatrixOffd(C) = C_tmp_offd; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_Int *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int,num_cols_offd_C); for (i=0; i < num_cols_offd_C; i++) P_marker[i] = -1; jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_Int,jj_count_offd); jj_count_offd = 0; for (i=0; i < num_cols_offd_C; i++) if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i); hypre_TFree(C_ext_offd_i); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j); hypre_TFree(C_ext_diag_data); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j); hypre_TFree(C_ext_offd_data); } if (num_cols_offd_B) hypre_TFree(map_B_to_C); if (C_diag) hypre_CSRMatrixDestroy(C_tmp_diag); if (C_offd) hypre_CSRMatrixDestroy(C_tmp_offd); return C; }
pt.c
/* Handle parameterized types (templates) for GNU -*- C++ -*-. Copyright (C) 1992-2020 Free Software Foundation, Inc. Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing. Rewritten by Jason Merrill (jason@cygnus.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Known bugs or deficiencies include: all methods must be provided in header files; can't use a source file that contains only the method templates and "just win". */ #include "config.h" #include "system.h" #include "coretypes.h" #include "cp-tree.h" #include "timevar.h" #include "stringpool.h" #include "varasm.h" #include "attribs.h" #include "stor-layout.h" #include "intl.h" #include "c-family/c-objc.h" #include "cp-objcp-common.h" #include "toplev.h" #include "tree-iterator.h" #include "type-utils.h" #include "gimplify.h" #include "gcc-rich-location.h" #include "selftest.h" #include "target.h" /* The type of functions taking a tree, and some additional data, and returning an int. */ typedef int (*tree_fn_t) (tree, void*); /* The PENDING_TEMPLATES is a TREE_LIST of templates whose instantiations have been deferred, either because their definitions were not yet available, or because we were putting off doing the work. */ struct GTY ((chain_next ("%h.next"))) pending_template { struct pending_template *next; struct tinst_level *tinst; }; static GTY(()) struct pending_template *pending_templates; static GTY(()) struct pending_template *last_pending_template; int processing_template_parmlist; static int template_header_count; static GTY(()) tree saved_trees; static vec<int> inline_parm_levels; static GTY(()) struct tinst_level *current_tinst_level; static GTY(()) vec<tree, va_gc> *saved_access_scope; /* Live only within one (recursive) call to tsubst_expr. We use this to pass the statement expression node from the STMT_EXPR to the EXPR_STMT that is its result. */ static tree cur_stmt_expr; // -------------------------------------------------------------------------- // // Local Specialization Stack // // Implementation of the RAII helper for creating new local // specializations. local_specialization_stack::local_specialization_stack (lss_policy policy) : saved (local_specializations) { if (policy == lss_nop) ; else if (policy == lss_blank || !saved) local_specializations = new hash_map<tree, tree>; else local_specializations = new hash_map<tree, tree>(*saved); } local_specialization_stack::~local_specialization_stack () { if (local_specializations != saved) { delete local_specializations; local_specializations = saved; } } /* True if we've recursed into fn_type_unification too many times. */ static bool excessive_deduction_depth; struct GTY((for_user)) spec_entry { tree tmpl; tree args; tree spec; }; struct spec_hasher : ggc_ptr_hash<spec_entry> { static hashval_t hash (spec_entry *); static bool equal (spec_entry *, spec_entry *); }; static GTY (()) hash_table<spec_hasher> *decl_specializations; static GTY (()) hash_table<spec_hasher> *type_specializations; /* Contains canonical template parameter types. The vector is indexed by the TEMPLATE_TYPE_IDX of the template parameter. Each element is a TREE_LIST, whose TREE_VALUEs contain the canonical template parameters of various types and levels. */ static GTY(()) vec<tree, va_gc> *canonical_template_parms; #define UNIFY_ALLOW_NONE 0 #define UNIFY_ALLOW_MORE_CV_QUAL 1 #define UNIFY_ALLOW_LESS_CV_QUAL 2 #define UNIFY_ALLOW_DERIVED 4 #define UNIFY_ALLOW_INTEGER 8 #define UNIFY_ALLOW_OUTER_LEVEL 16 #define UNIFY_ALLOW_OUTER_MORE_CV_QUAL 32 #define UNIFY_ALLOW_OUTER_LESS_CV_QUAL 64 enum template_base_result { tbr_incomplete_type, tbr_ambiguous_baseclass, tbr_success }; static bool resolve_overloaded_unification (tree, tree, tree, tree, unification_kind_t, int, bool); static int try_one_overload (tree, tree, tree, tree, tree, unification_kind_t, int, bool, bool); static int unify (tree, tree, tree, tree, int, bool); static void add_pending_template (tree); static tree reopen_tinst_level (struct tinst_level *); static tree tsubst_initializer_list (tree, tree); static tree get_partial_spec_bindings (tree, tree, tree); static tree coerce_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static tree coerce_innermost_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static void tsubst_enum (tree, tree, tree); static tree add_to_template_args (tree, tree); static bool check_instantiated_args (tree, tree, tsubst_flags_t); static int check_non_deducible_conversion (tree, tree, int, int, struct conversion **, bool); static int maybe_adjust_types_for_deduction (unification_kind_t, tree*, tree*, tree); static int type_unification_real (tree, tree, tree, const tree *, unsigned int, int, unification_kind_t, vec<deferred_access_check, va_gc> **, bool); static void note_template_header (int); static tree convert_nontype_argument_function (tree, tree, tsubst_flags_t); static tree convert_nontype_argument (tree, tree, tsubst_flags_t); static tree convert_template_argument (tree, tree, tree, tsubst_flags_t, int, tree); static tree for_each_template_parm (tree, tree_fn_t, void*, hash_set<tree> *, bool, tree_fn_t = NULL); static tree expand_template_argument_pack (tree); static tree build_template_parm_index (int, int, int, tree, tree); static bool inline_needs_template_parms (tree, bool); static void push_inline_template_parms_recursive (tree, int); static tree reduce_template_parm_level (tree, tree, int, tree, tsubst_flags_t); static int mark_template_parm (tree, void *); static int template_parm_this_level_p (tree, void *); static tree tsubst_friend_function (tree, tree); static tree tsubst_friend_class (tree, tree); static int can_complete_type_without_circularity (tree); static tree get_bindings (tree, tree, tree, bool); static int template_decl_level (tree); static int check_cv_quals_for_unify (int, tree, tree); static int unify_pack_expansion (tree, tree, tree, tree, unification_kind_t, bool, bool); static tree copy_template_args (tree); static tree tsubst_template_parms (tree, tree, tsubst_flags_t); tree most_specialized_partial_spec (tree, tsubst_flags_t); static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int); static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree); static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree); static bool check_specialization_scope (void); static tree process_partial_specialization (tree); static void set_current_access_from_decl (tree); static enum template_base_result get_template_base (tree, tree, tree, tree, bool , tree *); static tree try_class_unification (tree, tree, tree, tree, bool); static bool class_nttp_const_wrapper_p (tree t); static int coerce_template_template_parms (tree, tree, tsubst_flags_t, tree, tree); static bool template_template_parm_bindings_ok_p (tree, tree); static void tsubst_default_arguments (tree, tsubst_flags_t); static tree for_each_template_parm_r (tree *, int *, void *); static tree copy_default_args_to_explicit_spec_1 (tree, tree); static void copy_default_args_to_explicit_spec (tree); static bool invalid_nontype_parm_type_p (tree, tsubst_flags_t); static bool dependent_template_arg_p (tree); static bool any_template_arguments_need_structural_equality_p (tree); static bool dependent_type_p_r (tree); static tree tsubst_copy (tree, tree, tsubst_flags_t, tree); static tree tsubst_decl (tree, tree, tsubst_flags_t); static void perform_typedefs_access_check (tree tmpl, tree targs); static void append_type_to_template_for_access_check_1 (tree, tree, tree, location_t); static tree listify (tree); static tree listify_autos (tree, tree); static tree tsubst_template_parm (tree, tree, tsubst_flags_t); static tree instantiate_alias_template (tree, tree, tsubst_flags_t); static bool complex_alias_template_p (const_tree tmpl); static tree get_underlying_template (tree); static tree tsubst_attributes (tree, tree, tsubst_flags_t, tree); static tree canonicalize_expr_argument (tree, tsubst_flags_t); static tree make_argument_pack (tree); static void register_parameter_specializations (tree, tree); static tree enclosing_instantiation_of (tree tctx); /* Make the current scope suitable for access checking when we are processing T. T can be FUNCTION_DECL for instantiated function template, VAR_DECL for static member variable, or TYPE_DECL for alias template (needed by instantiate_decl). */ void push_access_scope (tree t) { gcc_assert (VAR_OR_FUNCTION_DECL_P (t) || TREE_CODE (t) == TYPE_DECL); if (DECL_FRIEND_CONTEXT (t)) push_nested_class (DECL_FRIEND_CONTEXT (t)); else if (DECL_CLASS_SCOPE_P (t)) push_nested_class (DECL_CONTEXT (t)); else push_to_top_level (); if (TREE_CODE (t) == FUNCTION_DECL) { vec_safe_push (saved_access_scope, current_function_decl); current_function_decl = t; } } /* Restore the scope set up by push_access_scope. T is the node we are processing. */ void pop_access_scope (tree t) { if (TREE_CODE (t) == FUNCTION_DECL) current_function_decl = saved_access_scope->pop(); if (DECL_FRIEND_CONTEXT (t) || DECL_CLASS_SCOPE_P (t)) pop_nested_class (); else pop_from_top_level (); } /* Do any processing required when DECL (a member template declaration) is finished. Returns the TEMPLATE_DECL corresponding to DECL, unless it is a specialization, in which case the DECL itself is returned. */ tree finish_member_template_decl (tree decl) { if (decl == error_mark_node) return error_mark_node; gcc_assert (DECL_P (decl)); if (TREE_CODE (decl) == TYPE_DECL) { tree type; type = TREE_TYPE (decl); if (type == error_mark_node) return error_mark_node; if (MAYBE_CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); check_member_template (tmpl); return tmpl; } return NULL_TREE; } else if (TREE_CODE (decl) == FIELD_DECL) error_at (DECL_SOURCE_LOCATION (decl), "data member %qD cannot be a member template", decl); else if (DECL_TEMPLATE_INFO (decl)) { if (!DECL_TEMPLATE_SPECIALIZATION (decl)) { check_member_template (DECL_TI_TEMPLATE (decl)); return DECL_TI_TEMPLATE (decl); } else return decl; } else error_at (DECL_SOURCE_LOCATION (decl), "invalid member template declaration %qD", decl); return error_mark_node; } /* Create a template info node. */ tree build_template_info (tree template_decl, tree template_args) { tree result = make_node (TEMPLATE_INFO); TI_TEMPLATE (result) = template_decl; TI_ARGS (result) = template_args; return result; } /* Return the template info node corresponding to T, whatever T is. */ tree get_template_info (const_tree t) { tree tinfo = NULL_TREE; if (!t || t == error_mark_node) return NULL; if (TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == PARM_DECL) return NULL; if (DECL_P (t) && DECL_LANG_SPECIFIC (t)) tinfo = DECL_TEMPLATE_INFO (t); if (!tinfo && DECL_IMPLICIT_TYPEDEF_P (t)) t = TREE_TYPE (t); if (OVERLOAD_TYPE_P (t)) tinfo = TYPE_TEMPLATE_INFO (t); else if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM) tinfo = TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t); return tinfo; } /* Returns the template nesting level of the indicated class TYPE. For example, in: template <class T> struct A { template <class U> struct B {}; }; A<T>::B<U> has depth two, while A<T> has depth one. Both A<T>::B<int> and A<int>::B<U> have depth one, if they are instantiations, not specializations. This function is guaranteed to return 0 if passed NULL_TREE so that, for example, `template_class_depth (current_class_type)' is always safe. */ int template_class_depth (tree type) { int depth; for (depth = 0; type && TREE_CODE (type) != NAMESPACE_DECL; ) { tree tinfo = get_template_info (type); if (tinfo && PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)) && uses_template_parms (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)))) ++depth; if (DECL_P (type)) { if (tree fctx = DECL_FRIEND_CONTEXT (type)) type = fctx; else type = CP_DECL_CONTEXT (type); } else if (LAMBDA_TYPE_P (type) && LAMBDA_TYPE_EXTRA_SCOPE (type)) type = LAMBDA_TYPE_EXTRA_SCOPE (type); else type = CP_TYPE_CONTEXT (type); } return depth; } /* Return TRUE if NODE instantiates a template that has arguments of its own, be it directly a primary template or indirectly through a partial specializations. */ static bool instantiates_primary_template_p (tree node) { tree tinfo = get_template_info (node); if (!tinfo) return false; tree tmpl = TI_TEMPLATE (tinfo); if (PRIMARY_TEMPLATE_P (tmpl)) return true; if (!DECL_TEMPLATE_SPECIALIZATION (tmpl)) return false; /* So now we know we have a specialization, but it could be a full or a partial specialization. To tell which, compare the depth of its template arguments with those of its context. */ tree ctxt = DECL_CONTEXT (tmpl); tree ctinfo = get_template_info (ctxt); if (!ctinfo) return true; return (TMPL_ARGS_DEPTH (TI_ARGS (tinfo)) > TMPL_ARGS_DEPTH (TI_ARGS (ctinfo))); } /* Subroutine of maybe_begin_member_template_processing. Returns true if processing DECL needs us to push template parms. */ static bool inline_needs_template_parms (tree decl, bool nsdmi) { if (!decl || (!nsdmi && ! DECL_TEMPLATE_INFO (decl))) return false; return (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (most_general_template (decl))) > (processing_template_decl + DECL_TEMPLATE_SPECIALIZATION (decl))); } /* Subroutine of maybe_begin_member_template_processing. Push the template parms in PARMS, starting from LEVELS steps into the chain, and ending at the beginning, since template parms are listed innermost first. */ static void push_inline_template_parms_recursive (tree parmlist, int levels) { tree parms = TREE_VALUE (parmlist); int i; if (levels > 1) push_inline_template_parms_recursive (TREE_CHAIN (parmlist), levels - 1); ++processing_template_decl; current_template_parms = tree_cons (size_int (processing_template_decl), parms, current_template_parms); TEMPLATE_PARMS_FOR_INLINE (current_template_parms) = 1; begin_scope (TREE_VEC_LENGTH (parms) ? sk_template_parms : sk_template_spec, NULL); for (i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (error_operand_p (parm)) continue; gcc_assert (DECL_P (parm)); switch (TREE_CODE (parm)) { case TYPE_DECL: case TEMPLATE_DECL: pushdecl (parm); break; case PARM_DECL: /* Push the CONST_DECL. */ pushdecl (TEMPLATE_PARM_DECL (DECL_INITIAL (parm))); break; default: gcc_unreachable (); } } } /* Restore the template parameter context for a member template, a friend template defined in a class definition, or a non-template member of template class. */ void maybe_begin_member_template_processing (tree decl) { tree parms; int levels = 0; bool nsdmi = TREE_CODE (decl) == FIELD_DECL; if (nsdmi) { tree ctx = DECL_CONTEXT (decl); decl = (CLASSTYPE_TEMPLATE_INFO (ctx) /* Disregard full specializations (c++/60999). */ && uses_template_parms (ctx) ? CLASSTYPE_TI_TEMPLATE (ctx) : NULL_TREE); } if (inline_needs_template_parms (decl, nsdmi)) { parms = DECL_TEMPLATE_PARMS (most_general_template (decl)); levels = TMPL_PARMS_DEPTH (parms) - processing_template_decl; if (DECL_TEMPLATE_SPECIALIZATION (decl)) { --levels; parms = TREE_CHAIN (parms); } push_inline_template_parms_recursive (parms, levels); } /* Remember how many levels of template parameters we pushed so that we can pop them later. */ inline_parm_levels.safe_push (levels); } /* Undo the effects of maybe_begin_member_template_processing. */ void maybe_end_member_template_processing (void) { int i; int last; if (inline_parm_levels.length () == 0) return; last = inline_parm_levels.pop (); for (i = 0; i < last; ++i) { --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); poplevel (0, 0, 0); } } /* Return a new template argument vector which contains all of ARGS, but has as its innermost set of arguments the EXTRA_ARGS. */ static tree add_to_template_args (tree args, tree extra_args) { tree new_args; int extra_depth; int i; int j; if (args == NULL_TREE || extra_args == error_mark_node) return extra_args; extra_depth = TMPL_ARGS_DEPTH (extra_args); new_args = make_tree_vec (TMPL_ARGS_DEPTH (args) + extra_depth); for (i = 1; i <= TMPL_ARGS_DEPTH (args); ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); for (j = 1; j <= extra_depth; ++j, ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (extra_args, j)); return new_args; } /* Like add_to_template_args, but only the outermost ARGS are added to the EXTRA_ARGS. In particular, all but TMPL_ARGS_DEPTH (EXTRA_ARGS) levels are added. This function is used to combine the template arguments from a partial instantiation with the template arguments used to attain the full instantiation from the partial instantiation. */ tree add_outermost_template_args (tree args, tree extra_args) { tree new_args; /* If there are more levels of EXTRA_ARGS than there are ARGS, something very fishy is going on. */ gcc_assert (TMPL_ARGS_DEPTH (args) >= TMPL_ARGS_DEPTH (extra_args)); /* If *all* the new arguments will be the EXTRA_ARGS, just return them. */ if (TMPL_ARGS_DEPTH (args) == TMPL_ARGS_DEPTH (extra_args)) return extra_args; /* For the moment, we make ARGS look like it contains fewer levels. */ TREE_VEC_LENGTH (args) -= TMPL_ARGS_DEPTH (extra_args); new_args = add_to_template_args (args, extra_args); /* Now, we restore ARGS to its full dimensions. */ TREE_VEC_LENGTH (args) += TMPL_ARGS_DEPTH (extra_args); return new_args; } /* Return the N levels of innermost template arguments from the ARGS. */ tree get_innermost_template_args (tree args, int n) { tree new_args; int extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the innermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, TMPL_ARGS_DEPTH (args)); /* If we're not removing anything, just return the arguments we were given. */ extra_levels = TMPL_ARGS_DEPTH (args) - n; gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the outer arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i + extra_levels)); return new_args; } /* The inverse of get_innermost_template_args: Return all but the innermost EXTRA_LEVELS levels of template arguments from the ARGS. */ static tree strip_innermost_template_args (tree args, int extra_levels) { tree new_args; int n = TMPL_ARGS_DEPTH (args) - extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the outermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, 1); /* If we're not removing anything, just return the arguments we were given. */ gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the inner arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); return new_args; } /* We've got a template header coming up; push to a new level for storing the parms. */ void begin_template_parm_list (void) { /* We use a non-tag-transparent scope here, which causes pushtag to put tags in this scope, rather than in the enclosing class or namespace scope. This is the right thing, since we want TEMPLATE_DECLS, and not TYPE_DECLS for template classes. For a global template class, push_template_decl handles putting the TEMPLATE_DECL into top-level scope. For a nested template class, e.g.: template <class T> struct S1 { template <class T> struct S2 {}; }; pushtag contains special code to insert the TEMPLATE_DECL for S2 at the right scope. */ begin_scope (sk_template_parms, NULL); ++processing_template_decl; ++processing_template_parmlist; note_template_header (0); /* Add a dummy parameter level while we process the parameter list. */ current_template_parms = tree_cons (size_int (processing_template_decl), make_tree_vec (0), current_template_parms); } /* This routine is called when a specialization is declared. If it is invalid to declare a specialization here, an error is reported and false is returned, otherwise this routine will return true. */ static bool check_specialization_scope (void) { tree scope = current_scope (); /* [temp.expl.spec] An explicit specialization shall be declared in the namespace of which the template is a member, or, for member templates, in the namespace of which the enclosing class or enclosing class template is a member. An explicit specialization of a member function, member class or static data member of a class template shall be declared in the namespace of which the class template is a member. */ if (scope && TREE_CODE (scope) != NAMESPACE_DECL) { error ("explicit specialization in non-namespace scope %qD", scope); return false; } /* [temp.expl.spec] In an explicit specialization declaration for a member of a class template or a member template that appears in namespace scope, the member template and some of its enclosing class templates may remain unspecialized, except that the declaration shall not explicitly specialize a class member template if its enclosing class templates are not explicitly specialized as well. */ if (current_template_parms) { error ("enclosing class templates are not explicitly specialized"); return false; } return true; } /* We've just seen template <>. */ bool begin_specialization (void) { begin_scope (sk_template_spec, NULL); note_template_header (1); return check_specialization_scope (); } /* Called at then end of processing a declaration preceded by template<>. */ void end_specialization (void) { finish_scope (); reset_specialization (); } /* Any template <>'s that we have seen thus far are not referring to a function specialization. */ void reset_specialization (void) { processing_specialization = 0; template_header_count = 0; } /* We've just seen a template header. If SPECIALIZATION is nonzero, it was of the form template <>. */ static void note_template_header (int specialization) { processing_specialization = specialization; template_header_count++; } /* We're beginning an explicit instantiation. */ void begin_explicit_instantiation (void) { gcc_assert (!processing_explicit_instantiation); processing_explicit_instantiation = true; } void end_explicit_instantiation (void) { gcc_assert (processing_explicit_instantiation); processing_explicit_instantiation = false; } /* An explicit specialization or partial specialization of TMPL is being declared. Check that the namespace in which the specialization is occurring is permissible. Returns false iff it is invalid to specialize TMPL in the current namespace. */ static bool check_specialization_namespace (tree tmpl) { tree tpl_ns = decl_namespace_context (tmpl); /* [tmpl.expl.spec] An explicit specialization shall be declared in a namespace enclosing the specialized template. An explicit specialization whose declarator-id is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ if (current_scope() != DECL_CONTEXT (tmpl) && !at_namespace_scope_p ()) { error ("specialization of %qD must appear at namespace scope", tmpl); return false; } if (is_nested_namespace (current_namespace, tpl_ns, cxx_dialect < cxx11)) /* Same or enclosing namespace. */ return true; else { auto_diagnostic_group d; if (permerror (input_location, "specialization of %qD in different namespace", tmpl)) inform (DECL_SOURCE_LOCATION (tmpl), " from definition of %q#D", tmpl); return false; } } /* SPEC is an explicit instantiation. Check that it is valid to perform this explicit instantiation in the current namespace. */ static void check_explicit_instantiation_namespace (tree spec) { tree ns; /* DR 275: An explicit instantiation shall appear in an enclosing namespace of its template. */ ns = decl_namespace_context (spec); if (!is_nested_namespace (current_namespace, ns)) permerror (input_location, "explicit instantiation of %qD in namespace %qD " "(which does not enclose namespace %qD)", spec, current_namespace, ns); } /* Returns the type of a template specialization only if that specialization needs to be defined. Otherwise (e.g., if the type has already been defined), the function returns NULL_TREE. */ static tree maybe_new_partial_specialization (tree type) { /* An implicit instantiation of an incomplete type implies the definition of a new class template. template<typename T> struct S; template<typename T> struct S<T*>; Here, S<T*> is an implicit instantiation of S whose type is incomplete. */ if (CLASSTYPE_IMPLICIT_INSTANTIATION (type) && !COMPLETE_TYPE_P (type)) return type; /* It can also be the case that TYPE is a completed specialization. Continuing the previous example, suppose we also declare: template<typename T> requires Integral<T> struct S<T*>; Here, S<T*> refers to the specialization S<T*> defined above. However, we need to differentiate definitions because we intend to define a new partial specialization. In this case, we rely on the fact that the constraints are different for this declaration than that above. Note that we also get here for injected class names and late-parsed template definitions. We must ensure that we do not create new type declarations for those cases. */ if (flag_concepts && CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree args = CLASSTYPE_TI_ARGS (type); /* If there are no template parameters, this cannot be a new partial template specialization? */ if (!current_template_parms) return NULL_TREE; /* The injected-class-name is not a new partial specialization. */ if (DECL_SELF_REFERENCE_P (TYPE_NAME (type))) return NULL_TREE; /* If the constraints are not the same as those of the primary then, we can probably create a new specialization. */ tree type_constr = current_template_constraints (); if (type == TREE_TYPE (tmpl)) { tree main_constr = get_constraints (tmpl); if (equivalent_constraints (type_constr, main_constr)) return NULL_TREE; } /* Also, if there's a pre-existing specialization with matching constraints, then this also isn't new. */ tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_tmpl = TREE_VALUE (specs); tree spec_args = TREE_PURPOSE (specs); tree spec_constr = get_constraints (spec_tmpl); if (comp_template_args (args, spec_args) && equivalent_constraints (type_constr, spec_constr)) return NULL_TREE; specs = TREE_CHAIN (specs); } /* Create a new type node (and corresponding type decl) for the newly declared specialization. */ tree t = make_class_type (TREE_CODE (type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (type); SET_TYPE_TEMPLATE_INFO (t, build_template_info (tmpl, args)); /* We only need a separate type node for storing the definition of this partial specialization; uses of S<T*> are unconstrained, so all are equivalent. So keep TYPE_CANONICAL the same. */ TYPE_CANONICAL (t) = TYPE_CANONICAL (type); /* Build the corresponding type decl. */ tree d = create_implicit_typedef (DECL_NAME (tmpl), t); DECL_CONTEXT (d) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (d) = input_location; TREE_PRIVATE (d) = (current_access_specifier == access_private_node); TREE_PROTECTED (d) = (current_access_specifier == access_protected_node); return t; } return NULL_TREE; } /* The TYPE is being declared. If it is a template type, that means it is a partial specialization. Do appropriate error-checking. */ tree maybe_process_partial_specialization (tree type) { tree context; if (type == error_mark_node) return error_mark_node; /* A lambda that appears in specialization context is not itself a specialization. */ if (CLASS_TYPE_P (type) && CLASSTYPE_LAMBDA_EXPR (type)) return type; if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) { error ("name of class shadows template template parameter %qD", TYPE_NAME (type)); return error_mark_node; } context = TYPE_CONTEXT (type); if (TYPE_ALIAS_P (type)) { tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (type); if (tinfo && DECL_ALIAS_TEMPLATE_P (TI_TEMPLATE (tinfo))) error ("specialization of alias template %qD", TI_TEMPLATE (tinfo)); else error ("explicit specialization of non-template %qT", type); return error_mark_node; } else if (CLASS_TYPE_P (type) && CLASSTYPE_USE_TEMPLATE (type)) { /* This is for ordinary explicit specialization and partial specialization of a template class such as: template <> class C<int>; or: template <class T> class C<T*>; Make sure that `C<int>' and `C<T*>' are implicit instantiations. */ if (tree t = maybe_new_partial_specialization (type)) { if (!check_specialization_namespace (CLASSTYPE_TI_TEMPLATE (t)) && !at_namespace_scope_p ()) return error_mark_node; SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (t)) = input_location; if (processing_template_decl) { tree decl = push_template_decl (TYPE_MAIN_DECL (t)); if (decl == error_mark_node) return error_mark_node; return TREE_TYPE (decl); } } else if (CLASSTYPE_TEMPLATE_INSTANTIATION (type)) error ("specialization of %qT after instantiation", type); else if (errorcount && !processing_specialization && CLASSTYPE_TEMPLATE_SPECIALIZATION (type) && !uses_template_parms (CLASSTYPE_TI_ARGS (type))) /* Trying to define a specialization either without a template<> header or in an inappropriate place. We've already given an error, so just bail now so we don't actually define the specialization. */ return error_mark_node; } else if (CLASS_TYPE_P (type) && !CLASSTYPE_USE_TEMPLATE (type) && CLASSTYPE_TEMPLATE_INFO (type) && context && CLASS_TYPE_P (context) && CLASSTYPE_TEMPLATE_INFO (context)) { /* This is for an explicit specialization of member class template according to [temp.expl.spec/18]: template <> template <class U> class C<int>::D; The context `C<int>' must be an implicit instantiation. Otherwise this is just a member class template declared earlier like: template <> class C<int> { template <class U> class D; }; template <> template <class U> class C<int>::D; In the first case, `C<int>::D' is a specialization of `C<T>::D' while in the second case, `C<int>::D' is a primary template and `C<T>::D' may not exist. */ if (CLASSTYPE_IMPLICIT_INSTANTIATION (context) && !COMPLETE_TYPE_P (type)) { tree t; tree tmpl = CLASSTYPE_TI_TEMPLATE (type); if (current_namespace != decl_namespace_context (tmpl)) { if (permerror (input_location, "specialization of %qD in different namespace", type)) inform (DECL_SOURCE_LOCATION (tmpl), "from definition of %q#D", tmpl); } /* Check for invalid specialization after instantiation: template <> template <> class C<int>::D<int>; template <> template <class U> class C<int>::D; */ for (t = DECL_TEMPLATE_INSTANTIATIONS (tmpl); t; t = TREE_CHAIN (t)) { tree inst = TREE_VALUE (t); if (CLASSTYPE_TEMPLATE_SPECIALIZATION (inst) || !COMPLETE_OR_OPEN_TYPE_P (inst)) { /* We already have a full specialization of this partial instantiation, or a full specialization has been looked up but not instantiated. Reassign it to the new member specialization template. */ spec_entry elt; spec_entry *entry; elt.tmpl = most_general_template (tmpl); elt.args = CLASSTYPE_TI_ARGS (inst); elt.spec = inst; type_specializations->remove_elt (&elt); elt.tmpl = tmpl; CLASSTYPE_TI_ARGS (inst) = elt.args = INNERMOST_TEMPLATE_ARGS (elt.args); spec_entry **slot = type_specializations->find_slot (&elt, INSERT); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; } else /* But if we've had an implicit instantiation, that's a problem ([temp.expl.spec]/6). */ error ("specialization %qT after instantiation %qT", type, inst); } /* Mark TYPE as a specialization. And as a result, we only have one level of template argument for the innermost class template. */ SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (type); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)) = input_location; CLASSTYPE_TI_ARGS (type) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)); } } else if (processing_specialization) { /* Someday C++0x may allow for enum template specialization. */ if (cxx_dialect > cxx98 && TREE_CODE (type) == ENUMERAL_TYPE && CLASS_TYPE_P (context) && CLASSTYPE_USE_TEMPLATE (context)) pedwarn (input_location, OPT_Wpedantic, "template specialization " "of %qD not allowed by ISO C++", type); else { error ("explicit specialization of non-template %qT", type); return error_mark_node; } } return type; } /* Returns nonzero if we can optimize the retrieval of specializations for TMPL, a TEMPLATE_DECL. In particular, for such a template, we do not use DECL_TEMPLATE_SPECIALIZATIONS at all. */ static inline bool optimize_specialization_lookup_p (tree tmpl) { return (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_CLASS_SCOPE_P (tmpl) /* DECL_CLASS_SCOPE_P holds of T::f even if T is a template parameter. */ && CLASS_TYPE_P (DECL_CONTEXT (tmpl)) /* The optimized lookup depends on the fact that the template arguments for the member function template apply purely to the containing class, which is not true if the containing class is an explicit or partial specialization. */ && !CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (tmpl)) && !DECL_MEMBER_TEMPLATE_P (tmpl) && !DECL_CONV_FN_P (tmpl) /* It is possible to have a template that is not a member template and is not a member of a template class: template <typename T> struct S { friend A::f(); }; Here, the friend function is a template, but the context does not have template information. The optimized lookup relies on having ARGS be the template arguments for both the class and the function template. */ && !DECL_FRIEND_P (DECL_TEMPLATE_RESULT (tmpl))); } /* Make sure ARGS doesn't use any inappropriate typedefs; we should have gone through coerce_template_parms by now. */ static void verify_unstripped_args_1 (tree inner) { for (int i = 0; i < TREE_VEC_LENGTH (inner); ++i) { tree arg = TREE_VEC_ELT (inner, i); if (TREE_CODE (arg) == TEMPLATE_DECL) /* OK */; else if (TYPE_P (arg)) gcc_assert (strip_typedefs (arg, NULL) == arg); else if (ARGUMENT_PACK_P (arg)) verify_unstripped_args_1 (ARGUMENT_PACK_ARGS (arg)); else if (strip_typedefs (TREE_TYPE (arg), NULL) != TREE_TYPE (arg)) /* Allow typedefs on the type of a non-type argument, since a parameter can have them. */; else gcc_assert (strip_typedefs_expr (arg, NULL) == arg); } } static void verify_unstripped_args (tree args) { ++processing_template_decl; if (!any_dependent_template_arguments_p (args)) verify_unstripped_args_1 (INNERMOST_TEMPLATE_ARGS (args)); --processing_template_decl; } /* Retrieve the specialization (in the sense of [temp.spec] - a specialization is either an instantiation or an explicit specialization) of TMPL for the given template ARGS. If there is no such specialization, return NULL_TREE. The ARGS are a vector of arguments, or a vector of vectors of arguments, in the case of templates with more than one level of parameters. If TMPL is a type template and CLASS_SPECIALIZATIONS_P is true, then we search for a partial specialization matching ARGS. This parameter is ignored if TMPL is not a class template. We can also look up a FIELD_DECL, if it is a lambda capture pack; the result is a NONTYPE_ARGUMENT_PACK. */ static tree retrieve_specialization (tree tmpl, tree args, hashval_t hash) { if (tmpl == NULL_TREE) return NULL_TREE; if (args == error_mark_node) return NULL_TREE; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL || TREE_CODE (tmpl) == FIELD_DECL); /* There should be as many levels of arguments as there are levels of parameters. */ gcc_assert (TMPL_ARGS_DEPTH (args) == (TREE_CODE (tmpl) == TEMPLATE_DECL ? TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)) : template_class_depth (DECL_CONTEXT (tmpl)))); if (flag_checking) verify_unstripped_args (args); /* Lambda functions in templates aren't instantiated normally, but through tsubst_lambda_expr. */ if (lambda_fn_in_template_p (tmpl)) return NULL_TREE; if (optimize_specialization_lookup_p (tmpl)) { /* The template arguments actually apply to the containing class. Find the class specialization with those arguments. */ tree class_template = CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (tmpl)); tree class_specialization = retrieve_specialization (class_template, args, 0); if (!class_specialization) return NULL_TREE; /* Find the instance of TMPL. */ tree fns = get_class_binding (class_specialization, DECL_NAME (tmpl)); for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (tree ti = get_template_info (fn)) if (TI_TEMPLATE (ti) == tmpl /* using-declarations can bring in a different instantiation of tmpl as a member of a different instantiation of tmpl's class. We don't want those here. */ && DECL_CONTEXT (fn) == class_specialization) return fn; } return NULL_TREE; } else { spec_entry *found; spec_entry elt; hash_table<spec_hasher> *specializations; elt.tmpl = tmpl; elt.args = args; elt.spec = NULL_TREE; if (DECL_CLASS_TEMPLATE_P (tmpl)) specializations = type_specializations; else specializations = decl_specializations; if (hash == 0) hash = spec_hasher::hash (&elt); found = specializations->find_with_hash (&elt, hash); if (found) return found->spec; } return NULL_TREE; } /* Like retrieve_specialization, but for local declarations. */ tree retrieve_local_specialization (tree tmpl) { if (local_specializations == NULL) return NULL_TREE; tree *slot = local_specializations->get (tmpl); return slot ? *slot : NULL_TREE; } /* Returns nonzero iff DECL is a specialization of TMPL. */ int is_specialization_of (tree decl, tree tmpl) { tree t; if (TREE_CODE (decl) == FUNCTION_DECL) { for (t = decl; t != NULL_TREE; t = DECL_TEMPLATE_INFO (t) ? DECL_TI_TEMPLATE (t) : NULL_TREE) if (t == tmpl) return 1; } else { gcc_assert (TREE_CODE (decl) == TYPE_DECL); for (t = TREE_TYPE (decl); t != NULL_TREE; t = CLASSTYPE_USE_TEMPLATE (t) ? TREE_TYPE (CLASSTYPE_TI_TEMPLATE (t)) : NULL_TREE) if (same_type_ignoring_top_level_qualifiers_p (t, TREE_TYPE (tmpl))) return 1; } return 0; } /* Returns nonzero iff DECL is a specialization of friend declaration FRIEND_DECL according to [temp.friend]. */ bool is_specialization_of_friend (tree decl, tree friend_decl) { bool need_template = true; int template_depth; gcc_assert (TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == TYPE_DECL); /* For [temp.friend/6] when FRIEND_DECL is an ordinary member function of a template class, we want to check if DECL is a specialization if this. */ if (TREE_CODE (friend_decl) == FUNCTION_DECL && DECL_TEMPLATE_INFO (friend_decl) && !DECL_USE_TEMPLATE (friend_decl)) { /* We want a TEMPLATE_DECL for `is_specialization_of'. */ friend_decl = DECL_TI_TEMPLATE (friend_decl); need_template = false; } else if (TREE_CODE (friend_decl) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (friend_decl)) need_template = false; /* There is nothing to do if this is not a template friend. */ if (TREE_CODE (friend_decl) != TEMPLATE_DECL) return false; if (is_specialization_of (decl, friend_decl)) return true; /* [temp.friend/6] A member of a class template may be declared to be a friend of a non-template class. In this case, the corresponding member of every specialization of the class template is a friend of the class granting friendship. For example, given a template friend declaration template <class T> friend void A<T>::f(); the member function below is considered a friend template <> struct A<int> { void f(); }; For this type of template friend, TEMPLATE_DEPTH below will be nonzero. To determine if DECL is a friend of FRIEND, we first check if the enclosing class is a specialization of another. */ template_depth = template_class_depth (CP_DECL_CONTEXT (friend_decl)); if (template_depth && DECL_CLASS_SCOPE_P (decl) && is_specialization_of (TYPE_NAME (DECL_CONTEXT (decl)), CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (friend_decl)))) { /* Next, we check the members themselves. In order to handle a few tricky cases, such as when FRIEND_DECL's are template <class T> friend void A<T>::g(T t); template <class T> template <T t> friend void A<T>::h(); and DECL's are void A<int>::g(int); template <int> void A<int>::h(); we need to figure out ARGS, the template arguments from the context of DECL. This is required for template substitution of `T' in the function parameter of `g' and template parameter of `h' in the above examples. Here ARGS corresponds to `int'. */ tree context = DECL_CONTEXT (decl); tree args = NULL_TREE; int current_depth = 0; while (current_depth < template_depth) { if (CLASSTYPE_TEMPLATE_INFO (context)) { if (current_depth == 0) args = TYPE_TI_ARGS (context); else args = add_to_template_args (TYPE_TI_ARGS (context), args); current_depth++; } context = TYPE_CONTEXT (context); } if (TREE_CODE (decl) == FUNCTION_DECL) { bool is_template; tree friend_type; tree decl_type; tree friend_args_type; tree decl_args_type; /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)); if (need_template ^ is_template) return false; else if (is_template) { /* If both are templates, check template parameter list. */ tree friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); if (!comp_template_parms (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (decl)), friend_parms)) return false; decl_type = TREE_TYPE (DECL_TI_TEMPLATE (decl)); } else decl_type = TREE_TYPE (decl); friend_type = tsubst_function_type (TREE_TYPE (friend_decl), args, tf_none, NULL_TREE); if (friend_type == error_mark_node) return false; /* Check if return types match. */ if (!same_type_p (TREE_TYPE (decl_type), TREE_TYPE (friend_type))) return false; /* Check if function parameter types match, ignoring the `this' parameter. */ friend_args_type = TYPE_ARG_TYPES (friend_type); decl_args_type = TYPE_ARG_TYPES (decl_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (friend_decl)) friend_args_type = TREE_CHAIN (friend_args_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_args_type = TREE_CHAIN (decl_args_type); return compparms (decl_args_type, friend_args_type); } else { /* DECL is a TYPE_DECL */ bool is_template; tree decl_type = TREE_TYPE (decl); /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = CLASSTYPE_TEMPLATE_INFO (decl_type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (decl_type)); if (need_template ^ is_template) return false; else if (is_template) { tree friend_parms; /* If both are templates, check the name of the two TEMPLATE_DECL's first because is_friend didn't. */ if (DECL_NAME (CLASSTYPE_TI_TEMPLATE (decl_type)) != DECL_NAME (friend_decl)) return false; /* Now check template parameter list. */ friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); return comp_template_parms (DECL_TEMPLATE_PARMS (CLASSTYPE_TI_TEMPLATE (decl_type)), friend_parms); } else return (DECL_NAME (decl) == DECL_NAME (friend_decl)); } } return false; } /* Register the specialization SPEC as a specialization of TMPL with the indicated ARGS. IS_FRIEND indicates whether the specialization is actually just a friend declaration. ATTRLIST is the list of attributes that the specialization is declared with or NULL when it isn't. Returns SPEC, or an equivalent prior declaration, if available. We also store instantiations of field packs in the hash table, even though they are not themselves templates, to make lookup easier. */ static tree register_specialization (tree spec, tree tmpl, tree args, bool is_friend, hashval_t hash) { tree fn; spec_entry **slot = NULL; spec_entry elt; gcc_assert ((TREE_CODE (tmpl) == TEMPLATE_DECL && DECL_P (spec)) || (TREE_CODE (tmpl) == FIELD_DECL && TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK)); if (TREE_CODE (spec) == FUNCTION_DECL && uses_template_parms (DECL_TI_ARGS (spec))) /* This is the FUNCTION_DECL for a partial instantiation. Don't register it; we want the corresponding TEMPLATE_DECL instead. We use `uses_template_parms (DECL_TI_ARGS (spec))' rather than the more obvious `uses_template_parms (spec)' to avoid problems with default function arguments. In particular, given something like this: template <class T> void f(T t1, T t = T()) the default argument expression is not substituted for in an instantiation unless and until it is actually needed. */ return spec; if (optimize_specialization_lookup_p (tmpl)) /* We don't put these specializations in the hash table, but we might want to give an error about a mismatch. */ fn = retrieve_specialization (tmpl, args, 0); else { elt.tmpl = tmpl; elt.args = args; elt.spec = spec; if (hash == 0) hash = spec_hasher::hash (&elt); slot = decl_specializations->find_slot_with_hash (&elt, hash, INSERT); if (*slot) fn = ((spec_entry *) *slot)->spec; else fn = NULL_TREE; } /* We can sometimes try to re-register a specialization that we've already got. In particular, regenerate_decl_from_template calls duplicate_decls which will update the specialization list. But, we'll still get called again here anyhow. It's more convenient to simply allow this than to try to prevent it. */ if (fn == spec) return spec; else if (fn && DECL_TEMPLATE_SPECIALIZATION (spec)) { if (DECL_TEMPLATE_INSTANTIATION (fn)) { if (DECL_ODR_USED (fn) || DECL_EXPLICIT_INSTANTIATION (fn)) { error ("specialization of %qD after instantiation", fn); return error_mark_node; } else { tree clone; /* This situation should occur only if the first specialization is an implicit instantiation, the second is an explicit specialization, and the implicit instantiation has not yet been used. That situation can occur if we have implicitly instantiated a member function and then specialized it later. We can also wind up here if a friend declaration that looked like an instantiation turns out to be a specialization: template <class T> void foo(T); class S { friend void foo<>(int) }; template <> void foo(int); We transform the existing DECL in place so that any pointers to it become pointers to the updated declaration. If there was a definition for the template, but not for the specialization, we want this to look as if there were no definition, and vice versa. */ DECL_INITIAL (fn) = NULL_TREE; duplicate_decls (spec, fn, is_friend); /* The call to duplicate_decls will have applied [temp.expl.spec]: An explicit specialization of a function template is inline only if it is explicitly declared to be, and independently of whether its function template is. to the primary function; now copy the inline bits to the various clones. */ FOR_EACH_CLONE (clone, fn) { DECL_DECLARED_INLINE_P (clone) = DECL_DECLARED_INLINE_P (fn); DECL_SOURCE_LOCATION (clone) = DECL_SOURCE_LOCATION (fn); DECL_DELETED_FN (clone) = DECL_DELETED_FN (fn); } check_specialization_namespace (tmpl); return fn; } } else if (DECL_TEMPLATE_SPECIALIZATION (fn)) { tree dd = duplicate_decls (spec, fn, is_friend); if (dd == error_mark_node) /* We've already complained in duplicate_decls. */ return error_mark_node; if (dd == NULL_TREE && DECL_INITIAL (spec)) /* Dup decl failed, but this is a new definition. Set the line number so any errors match this new definition. */ DECL_SOURCE_LOCATION (fn) = DECL_SOURCE_LOCATION (spec); return fn; } } else if (fn) return duplicate_decls (spec, fn, is_friend); /* A specialization must be declared in the same namespace as the template it is specializing. */ if (DECL_P (spec) && DECL_TEMPLATE_SPECIALIZATION (spec) && !check_specialization_namespace (tmpl)) DECL_CONTEXT (spec) = DECL_CONTEXT (tmpl); if (slot != NULL /* !optimize_specialization_lookup_p (tmpl) */) { spec_entry *entry = ggc_alloc<spec_entry> (); gcc_assert (tmpl && args && spec); *entry = elt; *slot = entry; if ((TREE_CODE (spec) == FUNCTION_DECL && DECL_NAMESPACE_SCOPE_P (spec) && PRIMARY_TEMPLATE_P (tmpl) && DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (tmpl)) == NULL_TREE) || variable_template_p (tmpl)) /* If TMPL is a forward declaration of a template function, keep a list of all specializations in case we need to reassign them to a friend template later in tsubst_friend_function. Also keep a list of all variable template instantiations so that process_partial_specialization can check whether a later partial specialization would have used it. */ DECL_TEMPLATE_INSTANTIATIONS (tmpl) = tree_cons (args, spec, DECL_TEMPLATE_INSTANTIATIONS (tmpl)); } return spec; } /* Returns true iff two spec_entry nodes are equivalent. */ int comparing_specializations; bool spec_hasher::equal (spec_entry *e1, spec_entry *e2) { int equal; ++comparing_specializations; equal = (e1->tmpl == e2->tmpl && comp_template_args (e1->args, e2->args)); if (equal && flag_concepts /* tmpl could be a FIELD_DECL for a capture pack. */ && TREE_CODE (e1->tmpl) == TEMPLATE_DECL && VAR_P (DECL_TEMPLATE_RESULT (e1->tmpl)) && uses_template_parms (e1->args)) { /* Partial specializations of a variable template can be distinguished by constraints. */ tree c1 = e1->spec ? get_constraints (e1->spec) : NULL_TREE; tree c2 = e2->spec ? get_constraints (e2->spec) : NULL_TREE; equal = equivalent_constraints (c1, c2); } --comparing_specializations; return equal; } /* Returns a hash for a template TMPL and template arguments ARGS. */ static hashval_t hash_tmpl_and_args (tree tmpl, tree args) { hashval_t val = iterative_hash_object (DECL_UID (tmpl), 0); return iterative_hash_template_arg (args, val); } /* Returns a hash for a spec_entry node based on the TMPL and ARGS members, ignoring SPEC. */ hashval_t spec_hasher::hash (spec_entry *e) { return hash_tmpl_and_args (e->tmpl, e->args); } /* Recursively calculate a hash value for a template argument ARG, for use in the hash tables of template specializations. We must be careful to (at least) skip the same entities template_args_equal does. */ hashval_t iterative_hash_template_arg (tree arg, hashval_t val) { if (arg == NULL_TREE) return iterative_hash_object (arg, val); if (!TYPE_P (arg)) /* Strip nop-like things, but not the same as STRIP_NOPS. */ while (CONVERT_EXPR_P (arg) || TREE_CODE (arg) == NON_LVALUE_EXPR || class_nttp_const_wrapper_p (arg)) arg = TREE_OPERAND (arg, 0); enum tree_code code = TREE_CODE (arg); val = iterative_hash_object (code, val); switch (code) { case ARGUMENT_PACK_SELECT: gcc_unreachable (); case ERROR_MARK: return val; case IDENTIFIER_NODE: return iterative_hash_object (IDENTIFIER_HASH_VALUE (arg), val); case TREE_VEC: for (int i = 0, len = TREE_VEC_LENGTH (arg); i < len; ++i) val = iterative_hash_template_arg (TREE_VEC_ELT (arg, i), val); return val; case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: val = iterative_hash_template_arg (PACK_EXPANSION_PATTERN (arg), val); return iterative_hash_template_arg (PACK_EXPANSION_EXTRA_ARGS (arg), val); case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return iterative_hash_template_arg (ARGUMENT_PACK_ARGS (arg), val); case TREE_LIST: for (; arg; arg = TREE_CHAIN (arg)) val = iterative_hash_template_arg (TREE_VALUE (arg), val); return val; case OVERLOAD: for (lkp_iterator iter (arg); iter; ++iter) val = iterative_hash_template_arg (*iter, val); return val; case CONSTRUCTOR: { tree field, value; unsigned i; iterative_hash_template_arg (TREE_TYPE (arg), val); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (arg), i, field, value) { val = iterative_hash_template_arg (field, val); val = iterative_hash_template_arg (value, val); } return val; } case PARM_DECL: if (!DECL_ARTIFICIAL (arg)) { val = iterative_hash_object (DECL_PARM_INDEX (arg), val); val = iterative_hash_object (DECL_PARM_LEVEL (arg), val); } return iterative_hash_template_arg (TREE_TYPE (arg), val); case TARGET_EXPR: return iterative_hash_template_arg (TARGET_EXPR_INITIAL (arg), val); case PTRMEM_CST: val = iterative_hash_template_arg (PTRMEM_CST_CLASS (arg), val); return iterative_hash_template_arg (PTRMEM_CST_MEMBER (arg), val); case TEMPLATE_PARM_INDEX: val = iterative_hash_template_arg (TREE_TYPE (TEMPLATE_PARM_DECL (arg)), val); val = iterative_hash_object (TEMPLATE_PARM_LEVEL (arg), val); return iterative_hash_object (TEMPLATE_PARM_IDX (arg), val); case TRAIT_EXPR: val = iterative_hash_object (TRAIT_EXPR_KIND (arg), val); val = iterative_hash_template_arg (TRAIT_EXPR_TYPE1 (arg), val); return iterative_hash_template_arg (TRAIT_EXPR_TYPE2 (arg), val); case BASELINK: val = iterative_hash_template_arg (BINFO_TYPE (BASELINK_BINFO (arg)), val); return iterative_hash_template_arg (DECL_NAME (get_first_fn (arg)), val); case MODOP_EXPR: val = iterative_hash_template_arg (TREE_OPERAND (arg, 0), val); code = TREE_CODE (TREE_OPERAND (arg, 1)); val = iterative_hash_object (code, val); return iterative_hash_template_arg (TREE_OPERAND (arg, 2), val); case LAMBDA_EXPR: /* [temp.over.link] Two lambda-expressions are never considered equivalent. So just hash the closure type. */ return iterative_hash_template_arg (TREE_TYPE (arg), val); case CAST_EXPR: case IMPLICIT_CONV_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case NEW_EXPR: val = iterative_hash_template_arg (TREE_TYPE (arg), val); /* Now hash operands as usual. */ break; case CALL_EXPR: { tree fn = CALL_EXPR_FN (arg); if (tree name = dependent_name (fn)) { if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) val = iterative_hash_template_arg (TREE_OPERAND (fn, 1), val); fn = name; } val = iterative_hash_template_arg (fn, val); call_expr_arg_iterator ai; for (tree x = first_call_expr_arg (arg, &ai); x; x = next_call_expr_arg (&ai)) val = iterative_hash_template_arg (x, val); return val; } default: break; } char tclass = TREE_CODE_CLASS (code); switch (tclass) { case tcc_type: if (tree ats = alias_template_specialization_p (arg, nt_transparent)) { // We want an alias specialization that survived strip_typedefs // to hash differently from its TYPE_CANONICAL, to avoid hash // collisions that compare as different in template_args_equal. // These could be dependent specializations that strip_typedefs // left alone, or untouched specializations because // coerce_template_parms returns the unconverted template // arguments if it sees incomplete argument packs. tree ti = TYPE_ALIAS_TEMPLATE_INFO (ats); return hash_tmpl_and_args (TI_TEMPLATE (ti), TI_ARGS (ti)); } switch (TREE_CODE (arg)) { case TEMPLATE_TEMPLATE_PARM: { tree tpi = TEMPLATE_TYPE_PARM_INDEX (arg); /* Do not recurse with TPI directly, as that is unbounded recursion. */ val = iterative_hash_object (TEMPLATE_PARM_LEVEL (tpi), val); val = iterative_hash_object (TEMPLATE_PARM_IDX (tpi), val); } break; case DECLTYPE_TYPE: val = iterative_hash_template_arg (DECLTYPE_TYPE_EXPR (arg), val); break; default: if (tree canonical = TYPE_CANONICAL (arg)) val = iterative_hash_object (TYPE_HASH (canonical), val); break; } return val; case tcc_declaration: case tcc_constant: return iterative_hash_expr (arg, val); default: gcc_assert (IS_EXPR_CODE_CLASS (tclass)); for (int i = 0, n = cp_tree_operand_length (arg); i < n; ++i) val = iterative_hash_template_arg (TREE_OPERAND (arg, i), val); return val; } gcc_unreachable (); return 0; } /* Unregister the specialization SPEC as a specialization of TMPL. Replace it with NEW_SPEC, if NEW_SPEC is non-NULL. Returns true if the SPEC was listed as a specialization of TMPL. Note that SPEC has been ggc_freed, so we can't look inside it. */ bool reregister_specialization (tree spec, tree tinfo, tree new_spec) { spec_entry *entry; spec_entry elt; elt.tmpl = most_general_template (TI_TEMPLATE (tinfo)); elt.args = TI_ARGS (tinfo); elt.spec = NULL_TREE; entry = decl_specializations->find (&elt); if (entry != NULL) { gcc_assert (entry->spec == spec || entry->spec == new_spec); gcc_assert (new_spec != NULL_TREE); entry->spec = new_spec; return 1; } return 0; } /* Like register_specialization, but for local declarations. We are registering SPEC, an instantiation of TMPL. */ void register_local_specialization (tree spec, tree tmpl) { gcc_assert (tmpl != spec); local_specializations->put (tmpl, spec); } /* TYPE is a class type. Returns true if TYPE is an explicitly specialized class. */ bool explicit_class_specialization_p (tree type) { if (!CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) return false; return !uses_template_parms (CLASSTYPE_TI_ARGS (type)); } /* Print the list of functions at FNS, going through all the overloads for each element of the list. Alternatively, FNS cannot be a TREE_LIST, in which case it will be printed together with all the overloads. MORE and *STR should respectively be FALSE and NULL when the function is called from the outside. They are used internally on recursive calls. print_candidates manages the two parameters and leaves NULL in *STR when it ends. */ static void print_candidates_1 (tree fns, char **str, bool more = false) { if (TREE_CODE (fns) == TREE_LIST) for (; fns; fns = TREE_CHAIN (fns)) print_candidates_1 (TREE_VALUE (fns), str, more || TREE_CHAIN (fns)); else for (lkp_iterator iter (fns); iter;) { tree cand = *iter; ++iter; const char *pfx = *str; if (!pfx) { if (more || iter) pfx = _("candidates are:"); else pfx = _("candidate is:"); *str = get_spaces (pfx); } inform (DECL_SOURCE_LOCATION (cand), "%s %#qD", pfx, cand); } } /* Print the list of candidate FNS in an error message. FNS can also be a TREE_LIST of non-functions in the case of an ambiguous lookup. */ void print_candidates (tree fns) { char *str = NULL; print_candidates_1 (fns, &str); free (str); } /* Get a (possibly) constrained template declaration for the purpose of ordering candidates. */ static tree get_template_for_ordering (tree list) { gcc_assert (TREE_CODE (list) == TREE_LIST); tree f = TREE_VALUE (list); if (tree ti = DECL_TEMPLATE_INFO (f)) return TI_TEMPLATE (ti); return f; } /* Among candidates having the same signature, return the most constrained or NULL_TREE if there is no best candidate. If the signatures of candidates vary (e.g., template specialization vs. member function), then there can be no most constrained. Note that we don't compare constraints on the functions themselves, but rather those of their templates. */ static tree most_constrained_function (tree candidates) { // Try to find the best candidate in a first pass. tree champ = candidates; for (tree c = TREE_CHAIN (champ); c; c = TREE_CHAIN (c)) { int winner = more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c)); if (winner == -1) champ = c; // The candidate is more constrained else if (winner == 0) return NULL_TREE; // Neither is more constrained } // Verify that the champ is better than previous candidates. for (tree c = candidates; c != champ; c = TREE_CHAIN (c)) { if (!more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c))) return NULL_TREE; } return champ; } /* Returns the template (one of the functions given by TEMPLATE_ID) which can be specialized to match the indicated DECL with the explicit template args given in TEMPLATE_ID. The DECL may be NULL_TREE if none is available. In that case, the functions in TEMPLATE_ID are non-members. If NEED_MEMBER_TEMPLATE is nonzero the function is known to be a specialization of a member template. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. See check_explicit_specialization for a more accurate description. TSK indicates what kind of template declaration (if any) is being declared. TSK_TEMPLATE indicates that the declaration given by DECL, though a FUNCTION_DECL, has template parameters, and is therefore a template function. The template args (those explicitly specified and those deduced) are output in a newly created vector *TARGS_OUT. If it is impossible to determine the result, an error message is issued. The error_mark_node is returned to indicate failure. */ static tree determine_specialization (tree template_id, tree decl, tree* targs_out, int need_member_template, int template_count, tmpl_spec_kind tsk) { tree fns; tree targs; tree explicit_targs; tree candidates = NULL_TREE; /* A TREE_LIST of templates of which DECL may be a specialization. The TREE_VALUE of each node is a TEMPLATE_DECL. The corresponding TREE_PURPOSE is the set of template arguments that, when used to instantiate the template, would produce a function with the signature of DECL. */ tree templates = NULL_TREE; int header_count; cp_binding_level *b; *targs_out = NULL_TREE; if (template_id == error_mark_node || decl == error_mark_node) return error_mark_node; /* We shouldn't be specializing a member template of an unspecialized class template; we already gave an error in check_specialization_scope, now avoid crashing. */ if (!VAR_P (decl) && template_count && DECL_CLASS_SCOPE_P (decl) && template_class_depth (DECL_CONTEXT (decl)) > 0) { gcc_assert (errorcount); return error_mark_node; } fns = TREE_OPERAND (template_id, 0); explicit_targs = TREE_OPERAND (template_id, 1); if (fns == error_mark_node) return error_mark_node; /* Check for baselinks. */ if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); if (TREE_CODE (decl) == FUNCTION_DECL && !is_overloaded_fn (fns)) { error_at (DECL_SOURCE_LOCATION (decl), "%qD is not a function template", fns); return error_mark_node; } else if (VAR_P (decl) && !variable_template_p (fns)) { error ("%qD is not a variable template", fns); return error_mark_node; } /* Count the number of template headers specified for this specialization. */ header_count = 0; for (b = current_binding_level; b->kind == sk_template_parms; b = b->level_chain) ++header_count; tree orig_fns = fns; if (variable_template_p (fns)) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (fns)); targs = coerce_template_parms (parms, explicit_targs, fns, tf_warning_or_error, /*req_all*/true, /*use_defarg*/true); if (targs != error_mark_node) templates = tree_cons (targs, fns, templates); } else for (lkp_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (TREE_CODE (fn) == TEMPLATE_DECL) { tree decl_arg_types; tree fn_arg_types; tree insttype; /* In case of explicit specialization, we need to check if the number of template headers appearing in the specialization is correct. This is usually done in check_explicit_specialization, but the check done there cannot be exhaustive when specializing member functions. Consider the following code: template <> void A<int>::f(int); template <> template <> void A<int>::f(int); Assuming that A<int> is not itself an explicit specialization already, the first line specializes "f" which is a non-template member function, whilst the second line specializes "f" which is a template member function. So both lines are syntactically correct, and check_explicit_specialization does not reject them. Here, we can do better, as we are matching the specialization against the declarations. We count the number of template headers, and we check if they match TEMPLATE_COUNT + 1 (TEMPLATE_COUNT is the number of qualifying template classes, plus there must be another header for the member template itself). Notice that if header_count is zero, this is not a specialization but rather a template instantiation, so there is no check we can perform here. */ if (header_count && header_count != template_count + 1) continue; /* Check that the number of template arguments at the innermost level for DECL is the same as for FN. */ if (current_binding_level->kind == sk_template_parms && !current_binding_level->explicit_spec_p && (TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (fn)) != TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (current_template_parms)))) continue; /* DECL might be a specialization of FN. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); /* For a non-static member function, we need to make sure that the const qualification is the same. Since get_bindings does not try to merge the "this" parameter, we must do the comparison explicitly. */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)) { if (!same_type_p (TREE_VALUE (fn_arg_types), TREE_VALUE (decl_arg_types))) continue; /* And the ref-qualification. */ if (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn))) continue; } /* Skip the "this" parameter and, for constructors of classes with virtual bases, the VTT parameter. A full specialization of a constructor will have a VTT parameter, but a template never will. */ decl_arg_types = skip_artificial_parms_for (decl, decl_arg_types); fn_arg_types = skip_artificial_parms_for (fn, fn_arg_types); /* Function templates cannot be specializations; there are no partial specializations of functions. Therefore, if the type of DECL does not match FN, there is no match. Note that it should never be the case that we have both candidates added here, and for regular member functions below. */ if (tsk == tsk_template) { if (compparms (fn_arg_types, decl_arg_types)) candidates = tree_cons (NULL_TREE, fn, candidates); continue; } /* See whether this function might be a specialization of this template. Suppress access control because we might be trying to make this specialization a friend, and we have already done access control for the declaration of the specialization. */ push_deferring_access_checks (dk_no_check); targs = get_bindings (fn, decl, explicit_targs, /*check_ret=*/true); pop_deferring_access_checks (); if (!targs) /* We cannot deduce template arguments that when used to specialize TMPL will produce DECL. */ continue; if (uses_template_parms (targs)) /* We deduced something involving 'auto', which isn't a valid template argument. */ continue; /* Remove, from the set of candidates, all those functions whose constraints are not satisfied. */ if (flag_concepts && !constraints_satisfied_p (fn, targs)) continue; // Then, try to form the new function type. insttype = tsubst (TREE_TYPE (fn), targs, tf_fndecl_type, NULL_TREE); if (insttype == error_mark_node) continue; fn_arg_types = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (insttype)); if (!compparms (fn_arg_types, decl_arg_types)) continue; /* Save this template, and the arguments deduced. */ templates = tree_cons (targs, fn, templates); } else if (need_member_template) /* FN is an ordinary member function, and we need a specialization of a member template. */ ; else if (TREE_CODE (fn) != FUNCTION_DECL) /* We can get IDENTIFIER_NODEs here in certain erroneous cases. */ ; else if (!DECL_FUNCTION_MEMBER_P (fn)) /* This is just an ordinary non-member function. Nothing can be a specialization of that. */ ; else if (DECL_ARTIFICIAL (fn)) /* Cannot specialize functions that are created implicitly. */ ; else { tree decl_arg_types; /* This is an ordinary member function. However, since we're here, we can assume its enclosing class is a template class. For example, template <typename T> struct S { void f(); }; template <> void S<int>::f() {} Here, S<int>::f is a non-template, but S<int> is a template class. If FN has the same type as DECL, we might be in business. */ if (!DECL_TEMPLATE_INFO (fn)) /* Its enclosing class is an explicit specialization of a template class. This is not a candidate. */ continue; if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (fn)))) /* The return types differ. */ continue; /* Adjust the type of DECL in case FN is a static member. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); if (DECL_STATIC_FUNCTION_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_arg_types = TREE_CHAIN (decl_arg_types); if (!compparms (TYPE_ARG_TYPES (TREE_TYPE (fn)), decl_arg_types)) continue; if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn)))) continue; // If the deduced arguments do not satisfy the constraints, // this is not a candidate. if (flag_concepts && !constraints_satisfied_p (fn)) continue; // Add the candidate. candidates = tree_cons (NULL_TREE, fn, candidates); } } if (templates && TREE_CHAIN (templates)) { /* We have: [temp.expl.spec] It is possible for a specialization with a given function signature to be instantiated from more than one function template. In such cases, explicit specification of the template arguments must be used to uniquely identify the function template specialization being specialized. Note that here, there's no suggestion that we're supposed to determine which of the candidate templates is most specialized. However, we, also have: [temp.func.order] Partial ordering of overloaded function template declarations is used in the following contexts to select the function template to which a function template specialization refers: -- when an explicit specialization refers to a function template. So, we do use the partial ordering rules, at least for now. This extension can only serve to make invalid programs valid, so it's safe. And, there is strong anecdotal evidence that the committee intended the partial ordering rules to apply; the EDG front end has that behavior, and John Spicer claims that the committee simply forgot to delete the wording in [temp.expl.spec]. */ tree tmpl = most_specialized_instantiation (templates); if (tmpl != error_mark_node) { templates = tmpl; TREE_CHAIN (templates) = NULL_TREE; } } // Concepts allows multiple declarations of member functions // with the same signature. Like above, we need to rely on // on the partial ordering of those candidates to determine which // is the best. if (flag_concepts && candidates && TREE_CHAIN (candidates)) { if (tree cand = most_constrained_function (candidates)) { candidates = cand; TREE_CHAIN (cand) = NULL_TREE; } } if (templates == NULL_TREE && candidates == NULL_TREE) { error ("template-id %qD for %q+D does not match any template " "declaration", template_id, decl); if (header_count && header_count != template_count + 1) inform (DECL_SOURCE_LOCATION (decl), "saw %d %<template<>%>, need %d for " "specializing a member function template", header_count, template_count + 1); else print_candidates (orig_fns); return error_mark_node; } else if ((templates && TREE_CHAIN (templates)) || (candidates && TREE_CHAIN (candidates)) || (templates && candidates)) { error ("ambiguous template specialization %qD for %q+D", template_id, decl); candidates = chainon (candidates, templates); print_candidates (candidates); return error_mark_node; } /* We have one, and exactly one, match. */ if (candidates) { tree fn = TREE_VALUE (candidates); *targs_out = copy_node (DECL_TI_ARGS (fn)); /* Propagate the candidate's constraints to the declaration. */ set_constraints (decl, get_constraints (fn)); /* DECL is a re-declaration or partial instantiation of a template function. */ if (TREE_CODE (fn) == TEMPLATE_DECL) return fn; /* It was a specialization of an ordinary member function in a template class. */ return DECL_TI_TEMPLATE (fn); } /* It was a specialization of a template. */ targs = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (TREE_VALUE (templates))); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (targs)) { *targs_out = copy_node (targs); SET_TMPL_ARGS_LEVEL (*targs_out, TMPL_ARGS_DEPTH (*targs_out), TREE_PURPOSE (templates)); } else *targs_out = TREE_PURPOSE (templates); return TREE_VALUE (templates); } /* Returns a chain of parameter types, exactly like the SPEC_TYPES, but with the default argument values filled in from those in the TMPL_TYPES. */ static tree copy_default_args_to_explicit_spec_1 (tree spec_types, tree tmpl_types) { tree new_spec_types; if (!spec_types) return NULL_TREE; if (spec_types == void_list_node) return void_list_node; /* Substitute into the rest of the list. */ new_spec_types = copy_default_args_to_explicit_spec_1 (TREE_CHAIN (spec_types), TREE_CHAIN (tmpl_types)); /* Add the default argument for this parameter. */ return hash_tree_cons (TREE_PURPOSE (tmpl_types), TREE_VALUE (spec_types), new_spec_types); } /* DECL is an explicit specialization. Replicate default arguments from the template it specializes. (That way, code like: template <class T> void f(T = 3); template <> void f(double); void g () { f (); } works, as required.) An alternative approach would be to look up the correct default arguments at the call-site, but this approach is consistent with how implicit instantiations are handled. */ static void copy_default_args_to_explicit_spec (tree decl) { tree tmpl; tree spec_types; tree tmpl_types; tree new_spec_types; tree old_type; tree new_type; tree t; tree object_type = NULL_TREE; tree in_charge = NULL_TREE; tree vtt = NULL_TREE; /* See if there's anything we need to do. */ tmpl = DECL_TI_TEMPLATE (decl); tmpl_types = TYPE_ARG_TYPES (TREE_TYPE (DECL_TEMPLATE_RESULT (tmpl))); for (t = tmpl_types; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) break; if (!t) return; old_type = TREE_TYPE (decl); spec_types = TYPE_ARG_TYPES (old_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) { /* Remove the this pointer, but remember the object's type for CV quals. */ object_type = TREE_TYPE (TREE_VALUE (spec_types)); spec_types = TREE_CHAIN (spec_types); tmpl_types = TREE_CHAIN (tmpl_types); if (DECL_HAS_IN_CHARGE_PARM_P (decl)) { /* DECL may contain more parameters than TMPL due to the extra in-charge parameter in constructors and destructors. */ in_charge = spec_types; spec_types = TREE_CHAIN (spec_types); } if (DECL_HAS_VTT_PARM_P (decl)) { vtt = spec_types; spec_types = TREE_CHAIN (spec_types); } } /* Compute the merged default arguments. */ new_spec_types = copy_default_args_to_explicit_spec_1 (spec_types, tmpl_types); /* Compute the new FUNCTION_TYPE. */ if (object_type) { if (vtt) new_spec_types = hash_tree_cons (TREE_PURPOSE (vtt), TREE_VALUE (vtt), new_spec_types); if (in_charge) /* Put the in-charge parameter back. */ new_spec_types = hash_tree_cons (TREE_PURPOSE (in_charge), TREE_VALUE (in_charge), new_spec_types); new_type = build_method_type_directly (object_type, TREE_TYPE (old_type), new_spec_types); } else new_type = build_function_type (TREE_TYPE (old_type), new_spec_types); new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (old_type)); new_type = cxx_copy_lang_qualifiers (new_type, old_type); TREE_TYPE (decl) = new_type; } /* Return the number of template headers we expect to see for a definition or specialization of CTYPE or one of its non-template members. */ int num_template_headers_for_class (tree ctype) { int num_templates = 0; while (ctype && CLASS_TYPE_P (ctype)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (ctype)) /* If CTYPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (ctype)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype))) ++num_templates; ctype = TYPE_CONTEXT (ctype); } return num_templates; } /* Do a simple sanity check on the template headers that precede the variable declaration DECL. */ void check_template_variable (tree decl) { tree ctx = CP_DECL_CONTEXT (decl); int wanted = num_template_headers_for_class (ctx); if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { if (cxx_dialect < cxx14) pedwarn (DECL_SOURCE_LOCATION (decl), 0, "variable templates only available with " "%<-std=c++14%> or %<-std=gnu++14%>"); // Namespace-scope variable templates should have a template header. ++wanted; } if (template_header_count > wanted) { auto_diagnostic_group d; bool warned = pedwarn (DECL_SOURCE_LOCATION (decl), 0, "too many template headers for %qD " "(should be %d)", decl, wanted); if (warned && CLASS_TYPE_P (ctx) && CLASSTYPE_TEMPLATE_SPECIALIZATION (ctx)) inform (DECL_SOURCE_LOCATION (decl), "members of an explicitly specialized class are defined " "without a template header"); } } /* An explicit specialization whose declarator-id or class-head-name is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. If the name declared in the explicit instantiation is an unqualified name, the explicit instantiation shall appear in the namespace where its template is declared or, if that namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ void check_unqualified_spec_or_inst (tree t, location_t loc) { tree tmpl = most_general_template (t); if (DECL_NAMESPACE_SCOPE_P (tmpl) && !is_nested_namespace (current_namespace, CP_DECL_CONTEXT (tmpl), true)) { if (processing_specialization) permerror (loc, "explicit specialization of %qD outside its " "namespace must use a nested-name-specifier", tmpl); else if (processing_explicit_instantiation && cxx_dialect >= cxx11) /* This was allowed in C++98, so only pedwarn. */ pedwarn (loc, OPT_Wpedantic, "explicit instantiation of %qD " "outside its namespace must use a nested-name-" "specifier", tmpl); } } /* Warn for a template specialization SPEC that is missing some of a set of function or type attributes that the template TEMPL is declared with. ATTRLIST is a list of additional attributes that SPEC should be taken to ultimately be declared with. */ static void warn_spec_missing_attributes (tree tmpl, tree spec, tree attrlist) { if (DECL_FUNCTION_TEMPLATE_P (tmpl)) tmpl = DECL_TEMPLATE_RESULT (tmpl); /* Avoid warning if the difference between the primary and the specialization is not in one of the attributes below. */ const char* const blacklist[] = { "alloc_align", "alloc_size", "assume_aligned", "format", "format_arg", "malloc", "nonnull", NULL }; /* Put together a list of the black listed attributes that the primary template is declared with that the specialization is not, in case it's not apparent from the most recent declaration of the primary. */ pretty_printer str; unsigned nattrs = decls_mismatched_attributes (tmpl, spec, attrlist, blacklist, &str); if (!nattrs) return; auto_diagnostic_group d; if (warning_at (DECL_SOURCE_LOCATION (spec), OPT_Wmissing_attributes, "explicit specialization %q#D may be missing attributes", spec)) inform (DECL_SOURCE_LOCATION (tmpl), nattrs > 1 ? G_("missing primary template attributes %s") : G_("missing primary template attribute %s"), pp_formatted_text (&str)); } /* Check to see if the function just declared, as indicated in DECLARATOR, and in DECL, is a specialization of a function template. We may also discover that the declaration is an explicit instantiation at this point. Returns DECL, or an equivalent declaration that should be used instead if all goes well. Issues an error message if something is amiss. Returns error_mark_node if the error is not easily recoverable. FLAGS is a bitmask consisting of the following flags: 2: The function has a definition. 4: The function is a friend. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. For example, in template <class T> struct S { void f(); }; void S<int>::f(); the TEMPLATE_COUNT would be 1. However, explicitly specialized classes are not counted in the TEMPLATE_COUNT, so that in template <class T> struct S {}; template <> struct S<int> { void f(); } template <> void S<int>::f(); the TEMPLATE_COUNT would be 0. (Note that this declaration is invalid; there should be no template <>.) If the function is a specialization, it is marked as such via DECL_TEMPLATE_SPECIALIZATION. Furthermore, its DECL_TEMPLATE_INFO is set up correctly, and it is added to the list of specializations for that template. */ tree check_explicit_specialization (tree declarator, tree decl, int template_count, int flags, tree attrlist) { int have_def = flags & 2; int is_friend = flags & 4; bool is_concept = flags & 8; int specialization = 0; int explicit_instantiation = 0; int member_specialization = 0; tree ctype = DECL_CLASS_CONTEXT (decl); tree dname = DECL_NAME (decl); tmpl_spec_kind tsk; if (is_friend) { if (!processing_specialization) tsk = tsk_none; else tsk = tsk_excessive_parms; } else tsk = current_tmpl_spec_kind (template_count); switch (tsk) { case tsk_none: if (processing_specialization && !VAR_P (decl)) { specialization = 1; SET_DECL_TEMPLATE_SPECIALIZATION (decl); } else if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { if (is_friend) /* This could be something like: template <class T> void f(T); class S { friend void f<>(int); } */ specialization = 1; else { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ error_at (cp_expr_loc_or_input_loc (declarator), "template-id %qE in declaration of primary template", declarator); return decl; } } break; case tsk_invalid_member_spec: /* The error has already been reported in check_specialization_scope. */ return error_mark_node; case tsk_invalid_expl_inst: error ("template parameter list used in explicit instantiation"); /* Fall through. */ case tsk_expl_inst: if (have_def) error ("definition provided for explicit instantiation"); explicit_instantiation = 1; break; case tsk_excessive_parms: case tsk_insufficient_parms: if (tsk == tsk_excessive_parms) error ("too many template parameter lists in declaration of %qD", decl); else if (template_header_count) error("too few template parameter lists in declaration of %qD", decl); else error("explicit specialization of %qD must be introduced by " "%<template <>%>", decl); /* Fall through. */ case tsk_expl_spec: if (is_concept) error ("explicit specialization declared %<concept%>"); if (VAR_P (decl) && TREE_CODE (declarator) != TEMPLATE_ID_EXPR) /* In cases like template<> constexpr bool v = true; We'll give an error in check_template_variable. */ break; SET_DECL_TEMPLATE_SPECIALIZATION (decl); if (ctype) member_specialization = 1; else specialization = 1; break; case tsk_template: if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ if (!uses_template_parms (TREE_OPERAND (declarator, 1))) error_at (cp_expr_loc_or_input_loc (declarator), "template-id %qE in declaration of primary template", declarator); else if (variable_template_p (TREE_OPERAND (declarator, 0))) { /* Partial specialization of variable template. */ SET_DECL_TEMPLATE_SPECIALIZATION (decl); specialization = 1; goto ok; } else if (cxx_dialect < cxx14) error_at (cp_expr_loc_or_input_loc (declarator), "non-type partial specialization %qE " "is not allowed", declarator); else error_at (cp_expr_loc_or_input_loc (declarator), "non-class, non-variable partial specialization %qE " "is not allowed", declarator); return decl; ok:; } if (ctype && CLASSTYPE_TEMPLATE_INSTANTIATION (ctype)) /* This is a specialization of a member template, without specialization the containing class. Something like: template <class T> struct S { template <class U> void f (U); }; template <> template <class U> void S<int>::f(U) {} That's a specialization -- but of the entire template. */ specialization = 1; break; default: gcc_unreachable (); } if ((specialization || member_specialization) /* This doesn't apply to variable templates. */ && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (decl))) { tree t = TYPE_ARG_TYPES (TREE_TYPE (decl)); for (; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (input_location, "default argument specified in explicit specialization"); break; } } if (specialization || member_specialization || explicit_instantiation) { tree tmpl = NULL_TREE; tree targs = NULL_TREE; bool was_template_id = (TREE_CODE (declarator) == TEMPLATE_ID_EXPR); /* Make sure that the declarator is a TEMPLATE_ID_EXPR. */ if (!was_template_id) { tree fns; gcc_assert (identifier_p (declarator)); if (ctype) fns = dname; else { /* If there is no class context, the explicit instantiation must be at namespace scope. */ gcc_assert (DECL_NAMESPACE_SCOPE_P (decl)); /* Find the namespace binding, using the declaration context. */ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, false, true); if (fns == error_mark_node) /* If lookup fails, look for a friend declaration so we can give a better diagnostic. */ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, /*type*/false, /*complain*/true, /*hidden*/true); if (fns == error_mark_node || !is_overloaded_fn (fns)) { error ("%qD is not a template function", dname); fns = error_mark_node; } } declarator = lookup_template_function (fns, NULL_TREE); } if (declarator == error_mark_node) return error_mark_node; if (ctype != NULL_TREE && TYPE_BEING_DEFINED (ctype)) { if (!explicit_instantiation) /* A specialization in class scope. This is invalid, but the error will already have been flagged by check_specialization_scope. */ return error_mark_node; else { /* It's not valid to write an explicit instantiation in class scope, e.g.: class C { template void f(); } This case is caught by the parser. However, on something like: template class C { void f(); }; (which is invalid) we can get here. The error will be issued later. */ ; } return decl; } else if (ctype != NULL_TREE && (identifier_p (TREE_OPERAND (declarator, 0)))) { // We'll match variable templates in start_decl. if (VAR_P (decl)) return decl; /* Find the list of functions in ctype that have the same name as the declared function. */ tree name = TREE_OPERAND (declarator, 0); if (constructor_name_p (name, ctype)) { if (DECL_CONSTRUCTOR_P (decl) ? !TYPE_HAS_USER_CONSTRUCTOR (ctype) : !CLASSTYPE_DESTRUCTOR (ctype)) { /* From [temp.expl.spec]: If such an explicit specialization for the member of a class template names an implicitly-declared special member function (clause _special_), the program is ill-formed. Similar language is found in [temp.explicit]. */ error ("specialization of implicitly-declared special member function"); return error_mark_node; } name = DECL_NAME (decl); } /* For a type-conversion operator, We might be looking for `operator int' which will be a specialization of `operator T'. Grab all the conversion operators, and then select from them. */ tree fns = get_class_binding (ctype, IDENTIFIER_CONV_OP_P (name) ? conv_op_identifier : name); if (fns == NULL_TREE) { error ("no member function %qD declared in %qT", name, ctype); return error_mark_node; } else TREE_OPERAND (declarator, 0) = fns; } /* Figure out what exactly is being specialized at this point. Note that for an explicit instantiation, even one for a member function, we cannot tell a priori whether the instantiation is for a member template, or just a member function of a template class. Even if a member template is being instantiated, the member template arguments may be elided if they can be deduced from the rest of the declaration. */ tmpl = determine_specialization (declarator, decl, &targs, member_specialization, template_count, tsk); if (!tmpl || tmpl == error_mark_node) /* We couldn't figure out what this declaration was specializing. */ return error_mark_node; else { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_HIDDEN_FRIEND_P (tmpl)) { auto_diagnostic_group d; if (pedwarn (DECL_SOURCE_LOCATION (decl), 0, "friend declaration %qD is not visible to " "explicit specialization", tmpl)) inform (DECL_SOURCE_LOCATION (tmpl), "friend declaration here"); } else if (!ctype && !is_friend && CP_DECL_CONTEXT (decl) == current_namespace) check_unqualified_spec_or_inst (tmpl, DECL_SOURCE_LOCATION (decl)); tree gen_tmpl = most_general_template (tmpl); if (explicit_instantiation) { /* We don't set DECL_EXPLICIT_INSTANTIATION here; that is done by do_decl_instantiation later. */ int arg_depth = TMPL_ARGS_DEPTH (targs); int parm_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (arg_depth > parm_depth) { /* If TMPL is not the most general template (for example, if TMPL is a friend template that is injected into namespace scope), then there will be too many levels of TARGS. Remove some of them here. */ int i; tree new_targs; new_targs = make_tree_vec (parm_depth); for (i = arg_depth - parm_depth; i < arg_depth; ++i) TREE_VEC_ELT (new_targs, i - (arg_depth - parm_depth)) = TREE_VEC_ELT (targs, i); targs = new_targs; } return instantiate_template (tmpl, targs, tf_error); } /* If we thought that the DECL was a member function, but it turns out to be specializing a static member function, make DECL a static member function as well. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_STATIC_FUNCTION_P (tmpl) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) revert_static_member_fn (decl); /* If this is a specialization of a member template of a template class, we want to return the TEMPLATE_DECL, not the specialization of it. */ if (tsk == tsk_template && !was_template_id) { tree result = DECL_TEMPLATE_RESULT (tmpl); SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_INITIAL (result) = NULL_TREE; if (have_def) { tree parm; DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (decl); /* We want to use the argument list specified in the definition, not in the original declaration. */ DECL_ARGUMENTS (result) = DECL_ARGUMENTS (decl); for (parm = DECL_ARGUMENTS (result); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = result; } return register_specialization (tmpl, gen_tmpl, targs, is_friend, 0); } /* Set up the DECL_TEMPLATE_INFO for DECL. */ DECL_TEMPLATE_INFO (decl) = build_template_info (tmpl, targs); if (was_template_id) TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)) = true; /* Inherit default function arguments from the template DECL is specializing. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl)) copy_default_args_to_explicit_spec (decl); /* This specialization has the same protection as the template it specializes. */ TREE_PRIVATE (decl) = TREE_PRIVATE (gen_tmpl); TREE_PROTECTED (decl) = TREE_PROTECTED (gen_tmpl); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... The parser rejects these, so unless action is taken here, explicit function specializations will always appear with global linkage. The action recommended by the C++ CWG in response to C++ defect report 605 is to make the storage class and linkage of the explicit specialization match the templated function: http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#605 */ if (tsk == tsk_expl_spec && DECL_FUNCTION_TEMPLATE_P (gen_tmpl)) { tree tmpl_func = DECL_TEMPLATE_RESULT (gen_tmpl); gcc_assert (TREE_CODE (tmpl_func) == FUNCTION_DECL); /* A concept cannot be specialized. */ if (DECL_DECLARED_CONCEPT_P (tmpl_func)) { error ("explicit specialization of function concept %qD", gen_tmpl); return error_mark_node; } /* This specialization has the same linkage and visibility as the function template it specializes. */ TREE_PUBLIC (decl) = TREE_PUBLIC (tmpl_func); if (! TREE_PUBLIC (decl)) { DECL_INTERFACE_KNOWN (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; } DECL_THIS_STATIC (decl) = DECL_THIS_STATIC (tmpl_func); if (DECL_VISIBILITY_SPECIFIED (tmpl_func)) { DECL_VISIBILITY_SPECIFIED (decl) = 1; DECL_VISIBILITY (decl) = DECL_VISIBILITY (tmpl_func); } } /* If DECL is a friend declaration, declared using an unqualified name, the namespace associated with DECL may have been set incorrectly. For example, in: template <typename T> void f(T); namespace N { struct S { friend void f<int>(int); } } we will have set the DECL_CONTEXT for the friend declaration to N, rather than to the global namespace. */ if (DECL_NAMESPACE_SCOPE_P (decl)) DECL_CONTEXT (decl) = DECL_CONTEXT (tmpl); if (is_friend && !have_def) /* This is not really a declaration of a specialization. It's just the name of an instantiation. But, it's not a request for an instantiation, either. */ SET_DECL_IMPLICIT_INSTANTIATION (decl); else if (TREE_CODE (decl) == FUNCTION_DECL) /* A specialization is not necessarily COMDAT. */ DECL_COMDAT (decl) = (TREE_PUBLIC (decl) && DECL_DECLARED_INLINE_P (decl)); else if (VAR_P (decl)) DECL_COMDAT (decl) = false; /* If this is a full specialization, register it so that we can find it again. Partial specializations will be registered in process_partial_specialization. */ if (!processing_template_decl) { warn_spec_missing_attributes (gen_tmpl, decl, attrlist); decl = register_specialization (decl, gen_tmpl, targs, is_friend, 0); } /* A 'structor should already have clones. */ gcc_assert (decl == error_mark_node || variable_template_p (tmpl) || !(DECL_CONSTRUCTOR_P (decl) || DECL_DESTRUCTOR_P (decl)) || DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl))); } } return decl; } /* Returns 1 iff PARMS1 and PARMS2 are identical sets of template parameters. These are represented in the same format used for DECL_TEMPLATE_PARMS. */ int comp_template_parms (const_tree parms1, const_tree parms2) { const_tree p1; const_tree p2; if (parms1 == parms2) return 1; for (p1 = parms1, p2 = parms2; p1 != NULL_TREE && p2 != NULL_TREE; p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2)) { tree t1 = TREE_VALUE (p1); tree t2 = TREE_VALUE (p2); int i; gcc_assert (TREE_CODE (t1) == TREE_VEC); gcc_assert (TREE_CODE (t2) == TREE_VEC); if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2)) return 0; for (i = 0; i < TREE_VEC_LENGTH (t2); ++i) { tree parm1 = TREE_VALUE (TREE_VEC_ELT (t1, i)); tree parm2 = TREE_VALUE (TREE_VEC_ELT (t2, i)); /* If either of the template parameters are invalid, assume they match for the sake of error recovery. */ if (error_operand_p (parm1) || error_operand_p (parm2)) return 1; if (TREE_CODE (parm1) != TREE_CODE (parm2)) return 0; if (TREE_CODE (parm1) == TEMPLATE_TYPE_PARM && (TEMPLATE_TYPE_PARAMETER_PACK (parm1) == TEMPLATE_TYPE_PARAMETER_PACK (parm2))) continue; else if (!same_type_p (TREE_TYPE (parm1), TREE_TYPE (parm2))) return 0; } } if ((p1 != NULL_TREE) != (p2 != NULL_TREE)) /* One set of parameters has more parameters lists than the other. */ return 0; return 1; } /* Returns true if two template parameters are declared with equivalent constraints. */ static bool template_parameter_constraints_equivalent_p (const_tree parm1, const_tree parm2) { tree req1 = TREE_TYPE (parm1); tree req2 = TREE_TYPE (parm2); if (!req1 != !req2) return false; if (req1) return cp_tree_equal (req1, req2); return true; } /* Returns true when two template parameters are equivalent. */ static bool template_parameters_equivalent_p (const_tree parm1, const_tree parm2) { tree decl1 = TREE_VALUE (parm1); tree decl2 = TREE_VALUE (parm2); /* If either of the template parameters are invalid, assume they match for the sake of error recovery. */ if (error_operand_p (decl1) || error_operand_p (decl2)) return true; /* ... they declare parameters of the same kind. */ if (TREE_CODE (decl1) != TREE_CODE (decl2)) return false; /* ... one parameter was introduced by a parameter declaration, then both are. This case arises as a result of eagerly rewriting declarations during parsing. */ if (DECL_VIRTUAL_P (decl1) != DECL_VIRTUAL_P (decl2)) return false; /* ... if either declares a pack, they both do. */ if (template_parameter_pack_p (decl1) != template_parameter_pack_p (decl2)) return false; if (TREE_CODE (decl1) == PARM_DECL) { /* ... if they declare non-type parameters, the types are equivalent. */ if (!same_type_p (TREE_TYPE (decl1), TREE_TYPE (decl2))) return false; } else if (TREE_CODE (decl2) == TEMPLATE_DECL) { /* ... if they declare template template parameters, their template parameter lists are equivalent. */ if (!template_heads_equivalent_p (decl1, decl2)) return false; } /* ... if they are declared with a qualified-concept name, they both are, and those names are equivalent. */ return template_parameter_constraints_equivalent_p (parm1, parm2); } /* Returns true if two template parameters lists are equivalent. Two template parameter lists are equivalent if they have the same length and their corresponding parameters are equivalent. PARMS1 and PARMS2 are TREE_LISTs containing TREE_VECs: the data structure returned by DECL_TEMPLATE_PARMS. This is generally the same implementation as comp_template_parms except that it also the concept names and arguments used to introduce parameters. */ static bool template_parameter_lists_equivalent_p (const_tree parms1, const_tree parms2) { if (parms1 == parms2) return true; const_tree p1 = parms1; const_tree p2 = parms2; while (p1 != NULL_TREE && p2 != NULL_TREE) { tree list1 = TREE_VALUE (p1); tree list2 = TREE_VALUE (p2); if (TREE_VEC_LENGTH (list1) != TREE_VEC_LENGTH (list2)) return 0; for (int i = 0; i < TREE_VEC_LENGTH (list2); ++i) { tree parm1 = TREE_VEC_ELT (list1, i); tree parm2 = TREE_VEC_ELT (list2, i); if (!template_parameters_equivalent_p (parm1, parm2)) return false; } p1 = TREE_CHAIN (p1); p2 = TREE_CHAIN (p2); } if ((p1 != NULL_TREE) != (p2 != NULL_TREE)) return false; return true; } /* Return true if the requires-clause of the template parameter lists are equivalent and false otherwise. */ static bool template_requirements_equivalent_p (const_tree parms1, const_tree parms2) { tree req1 = TEMPLATE_PARMS_CONSTRAINTS (parms1); tree req2 = TEMPLATE_PARMS_CONSTRAINTS (parms2); if ((req1 != NULL_TREE) != (req2 != NULL_TREE)) return false; if (!cp_tree_equal (req1, req2)) return false; return true; } /* Returns true if two template heads are equivalent. 17.6.6.1p6: Two template heads are equivalent if their template parameter lists are equivalent and their requires clauses are equivalent. In pre-C++20, this is equivalent to calling comp_template_parms for the template parameters of TMPL1 and TMPL2. */ bool template_heads_equivalent_p (const_tree tmpl1, const_tree tmpl2) { tree parms1 = DECL_TEMPLATE_PARMS (tmpl1); tree parms2 = DECL_TEMPLATE_PARMS (tmpl2); /* Don't change the matching rules for pre-C++20. */ if (cxx_dialect < cxx2a) return comp_template_parms (parms1, parms2); /* ... have the same number of template parameters, and their corresponding parameters are equivalent. */ if (!template_parameter_lists_equivalent_p (parms1, parms2)) return false; /* ... if either has a requires-clause, they both do and their corresponding constraint-expressions are equivalent. */ return template_requirements_equivalent_p (parms1, parms2); } /* Determine whether PARM is a parameter pack. */ bool template_parameter_pack_p (const_tree parm) { /* Determine if we have a non-type template parameter pack. */ if (TREE_CODE (parm) == PARM_DECL) return (DECL_TEMPLATE_PARM_P (parm) && TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))); if (TREE_CODE (parm) == TEMPLATE_PARM_INDEX) return TEMPLATE_PARM_PARAMETER_PACK (parm); /* If this is a list of template parameters, we could get a TYPE_DECL or a TEMPLATE_DECL. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); /* Otherwise it must be a type template parameter. */ return ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) && TEMPLATE_TYPE_PARAMETER_PACK (parm)); } /* Determine if T is a function parameter pack. */ bool function_parameter_pack_p (const_tree t) { if (t && TREE_CODE (t) == PARM_DECL) return DECL_PACK_P (t); return false; } /* Return the function template declaration of PRIMARY_FUNC_TMPL_INST. PRIMARY_FUNC_TMPL_INST is a primary function template instantiation. */ tree get_function_template_decl (const_tree primary_func_tmpl_inst) { if (! primary_func_tmpl_inst || TREE_CODE (primary_func_tmpl_inst) != FUNCTION_DECL || ! primary_template_specialization_p (primary_func_tmpl_inst)) return NULL; return DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (primary_func_tmpl_inst)); } /* Return true iff the function parameter PARAM_DECL was expanded from the function parameter pack PACK. */ bool function_parameter_expanded_from_pack_p (tree param_decl, tree pack) { if (DECL_ARTIFICIAL (param_decl) || !function_parameter_pack_p (pack)) return false; /* The parameter pack and its pack arguments have the same DECL_PARM_INDEX. */ return DECL_PARM_INDEX (pack) == DECL_PARM_INDEX (param_decl); } /* Determine whether ARGS describes a variadic template args list, i.e., one that is terminated by a template argument pack. */ static bool template_args_variadic_p (tree args) { int nargs; tree last_parm; if (args == NULL_TREE) return false; args = INNERMOST_TEMPLATE_ARGS (args); nargs = TREE_VEC_LENGTH (args); if (nargs == 0) return false; last_parm = TREE_VEC_ELT (args, nargs - 1); return ARGUMENT_PACK_P (last_parm); } /* Generate a new name for the parameter pack name NAME (an IDENTIFIER_NODE) that incorporates its */ static tree make_ith_pack_parameter_name (tree name, int i) { /* Munge the name to include the parameter index. */ #define NUMBUF_LEN 128 char numbuf[NUMBUF_LEN]; char* newname; int newname_len; if (name == NULL_TREE) return name; snprintf (numbuf, NUMBUF_LEN, "%i", i); newname_len = IDENTIFIER_LENGTH (name) + strlen (numbuf) + 2; newname = (char*)alloca (newname_len); snprintf (newname, newname_len, "%s#%i", IDENTIFIER_POINTER (name), i); return get_identifier (newname); } /* Return true if T is a primary function, class or alias template specialization, not including the template pattern. */ bool primary_template_specialization_p (const_tree t) { if (!t) return false; if (TREE_CODE (t) == FUNCTION_DECL || VAR_P (t)) return (DECL_LANG_SPECIFIC (t) && DECL_USE_TEMPLATE (t) && DECL_TEMPLATE_INFO (t) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t))); else if (CLASS_TYPE_P (t) && !TYPE_DECL_ALIAS_P (TYPE_NAME (t))) return (CLASSTYPE_TEMPLATE_INFO (t) && CLASSTYPE_USE_TEMPLATE (t) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t))); else if (alias_template_specialization_p (t, nt_transparent)) return true; return false; } /* Return true if PARM is a template template parameter. */ bool template_template_parameter_p (const_tree parm) { return DECL_TEMPLATE_TEMPLATE_PARM_P (parm); } /* Return true iff PARM is a DECL representing a type template parameter. */ bool template_type_parameter_p (const_tree parm) { return (parm && (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) && DECL_TEMPLATE_PARM_P (parm)); } /* Return the template parameters of T if T is a primary template instantiation, NULL otherwise. */ tree get_primary_template_innermost_parameters (const_tree t) { tree parms = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && primary_template_specialization_p (t)) parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (TI_TEMPLATE (template_info))); return parms; } /* Return the template parameters of the LEVELth level from the full list of template parameters PARMS. */ tree get_template_parms_at_level (tree parms, int level) { tree p; if (!parms || TREE_CODE (parms) != TREE_LIST || level > TMPL_PARMS_DEPTH (parms)) return NULL_TREE; for (p = parms; p; p = TREE_CHAIN (p)) if (TMPL_PARMS_DEPTH (p) == level) return p; return NULL_TREE; } /* Returns the template arguments of T if T is a template instantiation, NULL otherwise. */ tree get_template_innermost_arguments (const_tree t) { tree args = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && TI_ARGS (template_info)) args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (template_info)); return args; } /* Return the argument pack elements of T if T is a template argument pack, NULL otherwise. */ tree get_template_argument_pack_elems (const_tree t) { if (TREE_CODE (t) != TYPE_ARGUMENT_PACK && TREE_CODE (t) != NONTYPE_ARGUMENT_PACK) return NULL; return ARGUMENT_PACK_ARGS (t); } /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ static tree argument_pack_select_arg (tree t) { tree args = ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (t)); tree arg = TREE_VEC_ELT (args, ARGUMENT_PACK_SELECT_INDEX (t)); /* If the selected argument is an expansion E, that most likely means we were called from gen_elem_of_pack_expansion_instantiation during the substituting of an argument pack (of which the Ith element is a pack expansion, where I is ARGUMENT_PACK_SELECT_INDEX) into a pack expansion. In this case, the Ith element resulting from this substituting is going to be a pack expansion, which pattern is the pattern of E. Let's return the pattern of E, and gen_elem_of_pack_expansion_instantiation will build the resulting pack expansion from it. */ if (PACK_EXPANSION_P (arg)) { /* Make sure we aren't throwing away arg info. */ gcc_assert (!PACK_EXPANSION_EXTRA_ARGS (arg)); arg = PACK_EXPANSION_PATTERN (arg); } return arg; } /* True iff FN is a function representing a built-in variadic parameter pack. */ bool builtin_pack_fn_p (tree fn) { if (!fn || TREE_CODE (fn) != FUNCTION_DECL || !DECL_IS_BUILTIN (fn)) return false; if (id_equal (DECL_NAME (fn), "__integer_pack")) return true; return false; } /* True iff CALL is a call to a function representing a built-in variadic parameter pack. */ static bool builtin_pack_call_p (tree call) { if (TREE_CODE (call) != CALL_EXPR) return false; return builtin_pack_fn_p (CALL_EXPR_FN (call)); } /* Return a TREE_VEC for the expansion of __integer_pack(HI). */ static tree expand_integer_pack (tree call, tree args, tsubst_flags_t complain, tree in_decl) { tree ohi = CALL_EXPR_ARG (call, 0); tree hi = tsubst_copy_and_build (ohi, args, complain, in_decl, false/*fn*/, true/*int_cst*/); if (value_dependent_expression_p (hi)) { if (hi != ohi) { call = copy_node (call); CALL_EXPR_ARG (call, 0) = hi; } tree ex = make_pack_expansion (call, complain); tree vec = make_tree_vec (1); TREE_VEC_ELT (vec, 0) = ex; return vec; } else { hi = cxx_constant_value (hi); int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1; /* Calculate the largest value of len that won't make the size of the vec overflow an int. The compiler will exceed resource limits long before this, but it seems a decent place to diagnose. */ int max = ((INT_MAX - sizeof (tree_vec)) / sizeof (tree)) + 1; if (len < 0 || len > max) { if ((complain & tf_error) && hi != error_mark_node) error ("argument to %<__integer_pack%> must be between 0 and %d", max); return error_mark_node; } tree vec = make_tree_vec (len); for (int i = 0; i < len; ++i) TREE_VEC_ELT (vec, i) = size_int (i); return vec; } } /* Return a TREE_VEC for the expansion of built-in template parameter pack CALL. */ static tree expand_builtin_pack_call (tree call, tree args, tsubst_flags_t complain, tree in_decl) { if (!builtin_pack_call_p (call)) return NULL_TREE; tree fn = CALL_EXPR_FN (call); if (id_equal (DECL_NAME (fn), "__integer_pack")) return expand_integer_pack (call, args, complain, in_decl); return NULL_TREE; } /* Structure used to track the progress of find_parameter_packs_r. */ struct find_parameter_pack_data { /* TREE_LIST that will contain all of the parameter packs found by the traversal. */ tree* parameter_packs; /* Set of AST nodes that have been visited by the traversal. */ hash_set<tree> *visited; /* True iff we're making a type pack expansion. */ bool type_pack_expansion_p; }; /* Identifies all of the argument packs that occur in a template argument and appends them to the TREE_LIST inside DATA, which is a find_parameter_pack_data structure. This is a subroutine of make_pack_expansion and uses_parameter_packs. */ static tree find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data) { tree t = *tp; struct find_parameter_pack_data* ppd = (struct find_parameter_pack_data*)data; bool parameter_pack_p = false; /* Don't look through typedefs; we are interested in whether a parameter pack is actually written in the expression/type we're looking at, not the target type. */ if (TYPE_P (t) && typedef_variant_p (t)) { /* But do look at arguments for an alias template. */ if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) cp_walk_tree (&TI_ARGS (tinfo), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; } /* Identify whether this is a parameter pack or not. */ switch (TREE_CODE (t)) { case TEMPLATE_PARM_INDEX: if (TEMPLATE_PARM_PARAMETER_PACK (t)) parameter_pack_p = true; break; case TEMPLATE_TYPE_PARM: t = TYPE_MAIN_VARIANT (t); /* FALLTHRU */ case TEMPLATE_TEMPLATE_PARM: /* If the placeholder appears in the decl-specifier-seq of a function parameter pack (14.6.3), or the type-specifier-seq of a type-id that is a pack expansion, the invented template parameter is a template parameter pack. */ if (ppd->type_pack_expansion_p && is_auto (t)) TEMPLATE_TYPE_PARAMETER_PACK (t) = true; if (TEMPLATE_TYPE_PARAMETER_PACK (t)) parameter_pack_p = true; break; case FIELD_DECL: case PARM_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a PARM_DECL, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } break; case VAR_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a variadic capture proxy, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } else if (variable_template_specialization_p (t)) { cp_walk_tree (&DECL_TI_ARGS (t), find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; } break; case CALL_EXPR: if (builtin_pack_call_p (t)) parameter_pack_p = true; break; case BASES: parameter_pack_p = true; break; default: /* Not a parameter pack. */ break; } if (parameter_pack_p) { /* Add this parameter pack to the list. */ *ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs); } if (TYPE_P (t)) cp_walk_tree (&TYPE_CONTEXT (t), &find_parameter_packs_r, ppd, ppd->visited); /* This switch statement will return immediately if we don't find a parameter pack. ??? Should some of these be in cp_walk_subtrees? */ switch (TREE_CODE (t)) { case BOUND_TEMPLATE_TEMPLATE_PARM: /* Check the template itself. */ cp_walk_tree (&TREE_TYPE (TYPE_TI_TEMPLATE (t)), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; case DECL_EXPR: { tree decl = DECL_EXPR_DECL (t); /* Ignore the declaration of a capture proxy for a parameter pack. */ if (is_capture_proxy (decl)) *walk_subtrees = 0; if (is_typedef_decl (decl)) /* Since we stop at typedefs above, we need to look through them at the point of the DECL_EXPR. */ cp_walk_tree (&DECL_ORIGINAL_TYPE (decl), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; } case TEMPLATE_DECL: if (!DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return NULL_TREE; cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; case TYPENAME_TYPE: cp_walk_tree (&TYPENAME_TYPE_FULLNAME (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: *walk_subtrees = 0; return NULL_TREE; case INTEGER_TYPE: cp_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case IDENTIFIER_NODE: cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case LAMBDA_EXPR: { /* Look at explicit captures. */ for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap; cap = TREE_CHAIN (cap)) cp_walk_tree (&TREE_VALUE (cap), &find_parameter_packs_r, ppd, ppd->visited); /* Since we defer implicit capture, look in the parms and body. */ tree fn = lambda_function (t); cp_walk_tree (&TREE_TYPE (fn), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&DECL_SAVED_TREE (fn), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; } case DECLTYPE_TYPE: { /* When traversing a DECLTYPE_TYPE_EXPR, we need to set type_pack_expansion_p to false so that any placeholders within the expression don't get marked as parameter packs. */ bool type_pack_expansion_p = ppd->type_pack_expansion_p; ppd->type_pack_expansion_p = false; cp_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r, ppd, ppd->visited); ppd->type_pack_expansion_p = type_pack_expansion_p; *walk_subtrees = 0; return NULL_TREE; } case IF_STMT: cp_walk_tree (&IF_COND (t), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&THEN_CLAUSE (t), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&ELSE_CLAUSE (t), &find_parameter_packs_r, ppd, ppd->visited); /* Don't walk into IF_STMT_EXTRA_ARGS. */ *walk_subtrees = 0; return NULL_TREE; default: return NULL_TREE; } return NULL_TREE; } /* Determines if the expression or type T uses any parameter packs. */ tree uses_parameter_packs (tree t) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; return parameter_packs; } /* Turn ARG, which may be an expression, type, or a TREE_LIST representation a base-class initializer into a parameter pack expansion. If all goes well, the resulting node will be an EXPR_PACK_EXPANSION, TYPE_PACK_EXPANSION, or TREE_LIST, respectively. */ tree make_pack_expansion (tree arg, tsubst_flags_t complain) { tree result; tree parameter_packs = NULL_TREE; bool for_types = false; struct find_parameter_pack_data ppd; if (!arg || arg == error_mark_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_PURPOSE (arg)) { /* A TREE_LIST with a non-null TREE_PURPOSE is for a base class initializer. In this case, the TREE_PURPOSE will be a _TYPE node (representing the base class expansion we're initializing) and the TREE_VALUE will be a TREE_LIST containing the initialization arguments. The resulting expansion looks somewhat different from most expansions. Rather than returning just one _EXPANSION, we return a TREE_LIST whose TREE_PURPOSE is a TYPE_PACK_EXPANSION containing the bases that will be initialized. The TREE_VALUE will be identical to the original TREE_VALUE, which is a list of arguments that will be passed to each base. We do not introduce any new pack expansion nodes into the TREE_VALUE (although it is possible that some already exist), because the TREE_PURPOSE and TREE_VALUE all need to be expanded together with the same _EXPANSION node. Note that the TYPE_PACK_EXPANSION in the resulting TREE_PURPOSE will mention the parameter packs in both the bases and the arguments to the bases. */ tree purpose; tree value; tree parameter_packs = NULL_TREE; /* Determine which parameter packs will be used by the base class expansion. */ ppd.visited = new hash_set<tree>; ppd.parameter_packs = &parameter_packs; ppd.type_pack_expansion_p = false; gcc_assert (TYPE_P (TREE_PURPOSE (arg))); cp_walk_tree (&TREE_PURPOSE (arg), &find_parameter_packs_r, &ppd, ppd.visited); if (parameter_packs == NULL_TREE) { if (complain & tf_error) error ("base initializer expansion %qT contains no parameter packs", arg); delete ppd.visited; return error_mark_node; } if (TREE_VALUE (arg) != void_type_node) { /* Collect the sets of parameter packs used in each of the initialization arguments. */ for (value = TREE_VALUE (arg); value; value = TREE_CHAIN (value)) { /* Determine which parameter packs will be expanded in this argument. */ cp_walk_tree (&TREE_VALUE (value), &find_parameter_packs_r, &ppd, ppd.visited); } } delete ppd.visited; /* Create the pack expansion type for the base type. */ purpose = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (purpose, TREE_PURPOSE (arg)); PACK_EXPANSION_PARAMETER_PACKS (purpose) = parameter_packs; PACK_EXPANSION_LOCAL_P (purpose) = at_function_scope_p (); /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (purpose); return tree_cons (purpose, TREE_VALUE (arg), NULL_TREE); } if (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL) for_types = true; /* Build the PACK_EXPANSION_* node. */ result = for_types ? cxx_make_type (TYPE_PACK_EXPANSION) : make_node (EXPR_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (result, arg); if (TREE_CODE (result) == EXPR_PACK_EXPANSION) { /* Propagate type and const-expression information. */ TREE_TYPE (result) = TREE_TYPE (arg); TREE_CONSTANT (result) = TREE_CONSTANT (arg); /* Mark this read now, since the expansion might be length 0. */ mark_exp_read (arg); } else /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (result); /* Determine which parameter packs will be expanded. */ ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = TYPE_P (arg); cp_walk_tree (&arg, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; /* Make sure we found some parameter packs. */ if (parameter_packs == NULL_TREE) { if (complain & tf_error) { if (TYPE_P (arg)) error ("expansion pattern %qT contains no parameter packs", arg); else error ("expansion pattern %qE contains no parameter packs", arg); } return error_mark_node; } PACK_EXPANSION_PARAMETER_PACKS (result) = parameter_packs; PACK_EXPANSION_LOCAL_P (result) = at_function_scope_p (); return result; } /* Checks T for any "bare" parameter packs, which have not yet been expanded, and issues an error if any are found. This operation can only be done on full expressions or types (e.g., an expression statement, "if" condition, etc.), because we could have expressions like: foo(f(g(h(args)))...) where "args" is a parameter pack. check_for_bare_parameter_packs should not be called for the subexpressions args, h(args), g(h(args)), or f(g(h(args))), because we would produce erroneous error messages. Returns TRUE and emits an error if there were bare parameter packs, returns FALSE otherwise. */ bool check_for_bare_parameter_packs (tree t, location_t loc /* = UNKNOWN_LOCATION */) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; if (!processing_template_decl || !t || t == error_mark_node) return false; /* A lambda might use a parameter pack from the containing context. */ if (current_class_type && LAMBDA_TYPE_P (current_class_type) && CLASSTYPE_TEMPLATE_INFO (current_class_type)) return false; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; if (parameter_packs) { if (loc == UNKNOWN_LOCATION) loc = cp_expr_loc_or_input_loc (t); error_at (loc, "parameter packs not expanded with %<...%>:"); while (parameter_packs) { tree pack = TREE_VALUE (parameter_packs); tree name = NULL_TREE; if (TREE_CODE (pack) == TEMPLATE_TYPE_PARM || TREE_CODE (pack) == TEMPLATE_TEMPLATE_PARM) name = TYPE_NAME (pack); else if (TREE_CODE (pack) == TEMPLATE_PARM_INDEX) name = DECL_NAME (TEMPLATE_PARM_DECL (pack)); else if (TREE_CODE (pack) == CALL_EXPR) name = DECL_NAME (CALL_EXPR_FN (pack)); else name = DECL_NAME (pack); if (name) inform (loc, " %qD", name); else inform (loc, " %s", "<anonymous>"); parameter_packs = TREE_CHAIN (parameter_packs); } return true; } return false; } /* Expand any parameter packs that occur in the template arguments in ARGS. */ tree expand_template_argument_pack (tree args) { if (args == error_mark_node) return error_mark_node; tree result_args = NULL_TREE; int in_arg, out_arg = 0, nargs = args ? TREE_VEC_LENGTH (args) : 0; int num_result_args = -1; int non_default_args_count = -1; /* First, determine if we need to expand anything, and the number of slots we'll need. */ for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (arg == NULL_TREE) return args; if (ARGUMENT_PACK_P (arg)) { int num_packed = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)); if (num_result_args < 0) num_result_args = in_arg + num_packed; else num_result_args += num_packed; } else { if (num_result_args >= 0) num_result_args++; } } /* If no expansion is necessary, we're done. */ if (num_result_args < 0) return args; /* Expand arguments. */ result_args = make_tree_vec (num_result_args); if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (args)) non_default_args_count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (args); for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (ARGUMENT_PACK_P (arg)) { tree packed = ARGUMENT_PACK_ARGS (arg); int i, num_packed = TREE_VEC_LENGTH (packed); for (i = 0; i < num_packed; ++i, ++out_arg) TREE_VEC_ELT (result_args, out_arg) = TREE_VEC_ELT(packed, i); if (non_default_args_count > 0) non_default_args_count += num_packed - 1; } else { TREE_VEC_ELT (result_args, out_arg) = arg; ++out_arg; } } if (non_default_args_count >= 0) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (result_args, non_default_args_count); return result_args; } /* Checks if DECL shadows a template parameter. [temp.local]: A template-parameter shall not be redeclared within its scope (including nested scopes). Emits an error and returns TRUE if the DECL shadows a parameter, returns FALSE otherwise. */ bool check_template_shadow (tree decl) { tree olddecl; /* If we're not in a template, we can't possibly shadow a template parameter. */ if (!current_template_parms) return true; /* Figure out what we're shadowing. */ decl = OVL_FIRST (decl); olddecl = innermost_non_namespace_value (DECL_NAME (decl)); /* If there's no previous binding for this name, we're not shadowing anything, let alone a template parameter. */ if (!olddecl) return true; /* If we're not shadowing a template parameter, we're done. Note that OLDDECL might be an OVERLOAD (or perhaps even an ERROR_MARK), so we can't just blithely assume it to be a _DECL node. */ if (!DECL_P (olddecl) || !DECL_TEMPLATE_PARM_P (olddecl)) return true; /* We check for decl != olddecl to avoid bogus errors for using a name inside a class. We check TPFI to avoid duplicate errors for inline member templates. */ if (decl == olddecl || (DECL_TEMPLATE_PARM_P (decl) && TEMPLATE_PARMS_FOR_INLINE (current_template_parms))) return true; /* Don't complain about the injected class name, as we've already complained about the class itself. */ if (DECL_SELF_REFERENCE_P (decl)) return false; if (DECL_TEMPLATE_PARM_P (decl)) error ("declaration of template parameter %q+D shadows " "template parameter", decl); else error ("declaration of %q+#D shadows template parameter", decl); inform (DECL_SOURCE_LOCATION (olddecl), "template parameter %qD declared here", olddecl); return false; } /* Return a new TEMPLATE_PARM_INDEX with the indicated INDEX, LEVEL, ORIG_LEVEL, DECL, and TYPE. */ static tree build_template_parm_index (int index, int level, int orig_level, tree decl, tree type) { tree t = make_node (TEMPLATE_PARM_INDEX); TEMPLATE_PARM_IDX (t) = index; TEMPLATE_PARM_LEVEL (t) = level; TEMPLATE_PARM_ORIG_LEVEL (t) = orig_level; TEMPLATE_PARM_DECL (t) = decl; TREE_TYPE (t) = type; TREE_CONSTANT (t) = TREE_CONSTANT (decl); TREE_READONLY (t) = TREE_READONLY (decl); return t; } /* Find the canonical type parameter for the given template type parameter. Returns the canonical type parameter, which may be TYPE if no such parameter existed. */ static tree canonical_type_parameter (tree type) { tree list; int idx = TEMPLATE_TYPE_IDX (type); gcc_assert (TREE_CODE (type) != TEMPLATE_TEMPLATE_PARM); if (!canonical_template_parms) vec_alloc (canonical_template_parms, idx + 1); if (canonical_template_parms->length () <= (unsigned) idx) vec_safe_grow_cleared (canonical_template_parms, idx + 1); list = (*canonical_template_parms)[idx]; while (list && !comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL)) list = TREE_CHAIN (list); if (list) return TREE_VALUE (list); else { (*canonical_template_parms)[idx] = tree_cons (NULL_TREE, type, (*canonical_template_parms)[idx]); return type; } } /* Return a TEMPLATE_PARM_INDEX, similar to INDEX, but whose TEMPLATE_PARM_LEVEL has been decreased by LEVELS. If such a TEMPLATE_PARM_INDEX already exists, it is returned; otherwise, a new one is created. */ static tree reduce_template_parm_level (tree index, tree type, int levels, tree args, tsubst_flags_t complain) { if (TEMPLATE_PARM_DESCENDANTS (index) == NULL_TREE || (TEMPLATE_PARM_LEVEL (TEMPLATE_PARM_DESCENDANTS (index)) != TEMPLATE_PARM_LEVEL (index) - levels) || !same_type_p (type, TREE_TYPE (TEMPLATE_PARM_DESCENDANTS (index)))) { tree orig_decl = TEMPLATE_PARM_DECL (index); tree decl = build_decl (DECL_SOURCE_LOCATION (orig_decl), TREE_CODE (orig_decl), DECL_NAME (orig_decl), type); TREE_CONSTANT (decl) = TREE_CONSTANT (orig_decl); TREE_READONLY (decl) = TREE_READONLY (orig_decl); DECL_VIRTUAL_P (decl) = DECL_VIRTUAL_P (orig_decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); tree tpi = build_template_parm_index (TEMPLATE_PARM_IDX (index), TEMPLATE_PARM_LEVEL (index) - levels, TEMPLATE_PARM_ORIG_LEVEL (index), decl, type); TEMPLATE_PARM_DESCENDANTS (index) = tpi; TEMPLATE_PARM_PARAMETER_PACK (tpi) = TEMPLATE_PARM_PARAMETER_PACK (index); /* Template template parameters need this. */ tree inner = decl; if (TREE_CODE (decl) == TEMPLATE_DECL) { inner = build_decl (DECL_SOURCE_LOCATION (decl), TYPE_DECL, DECL_NAME (decl), type); DECL_TEMPLATE_RESULT (decl) = inner; DECL_ARTIFICIAL (inner) = true; DECL_TEMPLATE_PARMS (decl) = tsubst_template_parms (DECL_TEMPLATE_PARMS (orig_decl), args, complain); } /* Attach the TPI to the decl. */ if (TREE_CODE (inner) == TYPE_DECL) TEMPLATE_TYPE_PARM_INDEX (type) = tpi; else DECL_INITIAL (decl) = tpi; } return TEMPLATE_PARM_DESCENDANTS (index); } /* Process information from new template parameter PARM and append it to the LIST being built. This new parameter is a non-type parameter iff IS_NON_TYPE is true. This new parameter is a parameter pack iff IS_PARAMETER_PACK is true. The location of PARM is in PARM_LOC. */ tree process_template_parm (tree list, location_t parm_loc, tree parm, bool is_non_type, bool is_parameter_pack) { tree decl = 0; int idx = 0; gcc_assert (TREE_CODE (parm) == TREE_LIST); tree defval = TREE_PURPOSE (parm); tree constr = TREE_TYPE (parm); if (list) { tree p = tree_last (list); if (p && TREE_VALUE (p) != error_mark_node) { p = TREE_VALUE (p); if (TREE_CODE (p) == TYPE_DECL || TREE_CODE (p) == TEMPLATE_DECL) idx = TEMPLATE_TYPE_IDX (TREE_TYPE (p)); else idx = TEMPLATE_PARM_IDX (DECL_INITIAL (p)); } ++idx; } if (is_non_type) { parm = TREE_VALUE (parm); SET_DECL_TEMPLATE_PARM_P (parm); if (TREE_TYPE (parm) != error_mark_node) { /* [temp.param] The top-level cv-qualifiers on the template-parameter are ignored when determining its type. */ TREE_TYPE (parm) = TYPE_MAIN_VARIANT (TREE_TYPE (parm)); if (invalid_nontype_parm_type_p (TREE_TYPE (parm), 1)) TREE_TYPE (parm) = error_mark_node; else if (uses_parameter_packs (TREE_TYPE (parm)) && !is_parameter_pack /* If we're in a nested template parameter list, the template template parameter could be a parameter pack. */ && processing_template_parmlist == 1) { /* This template parameter is not a parameter pack, but it should be. Complain about "bare" parameter packs. */ check_for_bare_parameter_packs (TREE_TYPE (parm)); /* Recover by calling this a parameter pack. */ is_parameter_pack = true; } } /* A template parameter is not modifiable. */ TREE_CONSTANT (parm) = 1; TREE_READONLY (parm) = 1; decl = build_decl (parm_loc, CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm)); TREE_CONSTANT (decl) = 1; TREE_READONLY (decl) = 1; DECL_INITIAL (parm) = DECL_INITIAL (decl) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)) = is_parameter_pack; } else { tree t; parm = TREE_VALUE (TREE_VALUE (parm)); if (parm && TREE_CODE (parm) == TEMPLATE_DECL) { t = cxx_make_type (TEMPLATE_TEMPLATE_PARM); /* This is for distinguishing between real templates and template template parameters */ TREE_TYPE (parm) = t; /* any_template_parm_r expects to be able to get the targs of a DECL_TEMPLATE_RESULT. */ tree result = DECL_TEMPLATE_RESULT (parm); TREE_TYPE (result) = t; tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (parm)); tree tinfo = build_template_info (parm, args); retrofit_lang_decl (result); DECL_TEMPLATE_INFO (result) = tinfo; decl = parm; } else { t = cxx_make_type (TEMPLATE_TYPE_PARM); /* parm is either IDENTIFIER_NODE or NULL_TREE. */ decl = build_decl (parm_loc, TYPE_DECL, parm, t); } TYPE_NAME (t) = decl; TYPE_STUB_DECL (t) = decl; parm = decl; TEMPLATE_TYPE_PARM_INDEX (t) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_TYPE_PARAMETER_PACK (t) = is_parameter_pack; if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) SET_TYPE_STRUCTURAL_EQUALITY (t); else TYPE_CANONICAL (t) = canonical_type_parameter (t); } DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); /* Build requirements for the type/template parameter. This must be done after SET_DECL_TEMPLATE_PARM_P or process_template_parm could fail. */ tree reqs = finish_shorthand_constraint (parm, constr); decl = pushdecl (decl); if (!is_non_type) parm = decl; /* Build the parameter node linking the parameter declaration, its default argument (if any), and its constraints (if any). */ parm = build_tree_list (defval, parm); TEMPLATE_PARM_CONSTRAINTS (parm) = reqs; return chainon (list, parm); } /* The end of a template parameter list has been reached. Process the tree list into a parameter vector, converting each parameter into a more useful form. Type parameters are saved as IDENTIFIER_NODEs, and others as PARM_DECLs. */ tree end_template_parm_list (tree parms) { int nparms; tree parm, next; tree saved_parmlist = make_tree_vec (list_length (parms)); /* Pop the dummy parameter level and add the real one. */ current_template_parms = TREE_CHAIN (current_template_parms); current_template_parms = tree_cons (size_int (processing_template_decl), saved_parmlist, current_template_parms); for (parm = parms, nparms = 0; parm; parm = next, nparms++) { next = TREE_CHAIN (parm); TREE_VEC_ELT (saved_parmlist, nparms) = parm; TREE_CHAIN (parm) = NULL_TREE; } --processing_template_parmlist; return saved_parmlist; } // Explicitly indicate the end of the template parameter list. We assume // that the current template parameters have been constructed and/or // managed explicitly, as when creating new template template parameters // from a shorthand constraint. void end_template_parm_list () { --processing_template_parmlist; } /* end_template_decl is called after a template declaration is seen. */ void end_template_decl (void) { reset_specialization (); if (! processing_template_decl) return; /* This matches the pushlevel in begin_template_parm_list. */ finish_scope (); --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); } /* Takes a TEMPLATE_PARM_P or DECL_TEMPLATE_PARM_P node or a TREE_LIST thereof, and converts it into an argument suitable to be passed to the type substitution functions. Note that if the TREE_LIST contains an error_mark node, the returned argument is error_mark_node. */ tree template_parm_to_arg (tree t) { if (!t) return NULL_TREE; if (TREE_CODE (t) == TREE_LIST) t = TREE_VALUE (t); if (error_operand_p (t)) return error_mark_node; if (DECL_P (t) && DECL_TEMPLATE_PARM_P (t)) { if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (t) == TEMPLATE_DECL) t = TREE_TYPE (t); else t = DECL_INITIAL (t); } gcc_assert (TEMPLATE_PARM_P (t)); if (TREE_CODE (t) == TEMPLATE_TYPE_PARM || TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) { if (TEMPLATE_TYPE_PARAMETER_PACK (t)) { /* Turn this argument into a TYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } } else { if (TEMPLATE_PARM_PARAMETER_PACK (t)) { /* Turn this argument into a NONTYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); t = convert_from_reference (t); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = make_node (NONTYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } else t = convert_from_reference (t); } return t; } /* Given a single level of template parameters (a TREE_VEC), return it as a set of template arguments. */ tree template_parms_level_to_args (tree parms) { tree a = copy_node (parms); TREE_TYPE (a) = NULL_TREE; for (int i = TREE_VEC_LENGTH (a) - 1; i >= 0; --i) TREE_VEC_ELT (a, i) = template_parm_to_arg (TREE_VEC_ELT (a, i)); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (a, TREE_VEC_LENGTH (a)); return a; } /* Given a set of template parameters, return them as a set of template arguments. The template parameters are represented as a TREE_VEC, in the form documented in cp-tree.h for template arguments. */ tree template_parms_to_args (tree parms) { tree header; tree args = NULL_TREE; int length = TMPL_PARMS_DEPTH (parms); int l = length; /* If there is only one level of template parameters, we do not create a TREE_VEC of TREE_VECs. Instead, we return a single TREE_VEC containing the arguments. */ if (length > 1) args = make_tree_vec (length); for (header = parms; header; header = TREE_CHAIN (header)) { tree a = template_parms_level_to_args (TREE_VALUE (header)); if (length > 1) TREE_VEC_ELT (args, --l) = a; else args = a; } return args; } /* Within the declaration of a template, return the currently active template parameters as an argument TREE_VEC. */ static tree current_template_args (void) { return template_parms_to_args (current_template_parms); } /* Return the fully generic arguments for of TMPL, i.e. what current_template_args would be while parsing it. */ tree generic_targs_for (tree tmpl) { if (tmpl == NULL_TREE) return NULL_TREE; if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl) || DECL_TEMPLATE_SPECIALIZATION (tmpl)) /* DECL_TEMPLATE_RESULT doesn't have the arguments we want. For a template template parameter, it has no TEMPLATE_INFO; for a partial specialization, it has the arguments for the primary template, and we want the arguments for the partial specialization. */; else if (tree result = DECL_TEMPLATE_RESULT (tmpl)) if (tree ti = get_template_info (result)) return TI_ARGS (ti); return template_parms_to_args (DECL_TEMPLATE_PARMS (tmpl)); } /* Update the declared TYPE by doing any lookups which were thought to be dependent, but are not now that we know the SCOPE of the declarator. */ tree maybe_update_decl_type (tree orig_type, tree scope) { tree type = orig_type; if (type == NULL_TREE) return type; if (TREE_CODE (orig_type) == TYPE_DECL) type = TREE_TYPE (type); if (scope && TYPE_P (scope) && dependent_type_p (scope) && dependent_type_p (type) /* Don't bother building up the args in this case. */ && TREE_CODE (type) != TEMPLATE_TYPE_PARM) { /* tsubst in the args corresponding to the template parameters, including auto if present. Most things will be unchanged, but make_typename_type and tsubst_qualified_id will resolve TYPENAME_TYPEs and SCOPE_REFs that were previously dependent. */ tree args = current_template_args (); tree auto_node = type_uses_auto (type); tree pushed; if (auto_node) { tree auto_vec = make_tree_vec (1); TREE_VEC_ELT (auto_vec, 0) = auto_node; args = add_to_template_args (args, auto_vec); } pushed = push_scope (scope); type = tsubst (type, args, tf_warning_or_error, NULL_TREE); if (pushed) pop_scope (scope); } if (type == error_mark_node) return orig_type; if (TREE_CODE (orig_type) == TYPE_DECL) { if (same_type_p (type, TREE_TYPE (orig_type))) type = orig_type; else type = TYPE_NAME (type); } return type; } /* Return a TEMPLATE_DECL corresponding to DECL, using the indicated template PARMS and constraints, CONSTR. If MEMBER_TEMPLATE_P is true, the new template is a member template. */ static tree build_template_decl (tree decl, tree parms, bool member_template_p) { tree tmpl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), NULL_TREE); SET_DECL_LANGUAGE (tmpl, DECL_LANGUAGE (decl)); DECL_TEMPLATE_PARMS (tmpl) = parms; DECL_CONTEXT (tmpl) = DECL_CONTEXT (decl); DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_MEMBER_TEMPLATE_P (tmpl) = member_template_p; return tmpl; } struct template_parm_data { /* The level of the template parameters we are currently processing. */ int level; /* The index of the specialization argument we are currently processing. */ int current_arg; /* An array whose size is the number of template parameters. The elements are nonzero if the parameter has been used in any one of the arguments processed so far. */ int* parms; /* An array whose size is the number of template arguments. The elements are nonzero if the argument makes use of template parameters of this level. */ int* arg_uses_template_parms; }; /* Subroutine of push_template_decl used to see if each template parameter in a partial specialization is used in the explicit argument list. If T is of the LEVEL given in DATA (which is treated as a template_parm_data*), then DATA->PARMS is marked appropriately. */ static int mark_template_parm (tree t, void* data) { int level; int idx; struct template_parm_data* tpd = (struct template_parm_data*) data; template_parm_level_and_index (t, &level, &idx); if (level == tpd->level) { tpd->parms[idx] = 1; tpd->arg_uses_template_parms[tpd->current_arg] = 1; } /* In C++17 the type of a non-type argument is a deduced context. */ if (cxx_dialect >= cxx17 && TREE_CODE (t) == TEMPLATE_PARM_INDEX) for_each_template_parm (TREE_TYPE (t), &mark_template_parm, data, NULL, /*include_nondeduced_p=*/false); /* Return zero so that for_each_template_parm will continue the traversal of the tree; we want to mark *every* template parm. */ return 0; } /* Process the partial specialization DECL. */ static tree process_partial_specialization (tree decl) { tree type = TREE_TYPE (decl); tree tinfo = get_template_info (decl); tree maintmpl = TI_TEMPLATE (tinfo); tree specargs = TI_ARGS (tinfo); tree inner_args = INNERMOST_TEMPLATE_ARGS (specargs); tree main_inner_parms = DECL_INNERMOST_TEMPLATE_PARMS (maintmpl); tree inner_parms; tree inst; int nargs = TREE_VEC_LENGTH (inner_args); int ntparms; int i; bool did_error_intro = false; struct template_parm_data tpd; struct template_parm_data tpd2; gcc_assert (current_template_parms); /* A concept cannot be specialized. */ if (flag_concepts && variable_concept_p (maintmpl)) { error ("specialization of variable concept %q#D", maintmpl); return error_mark_node; } inner_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); ntparms = TREE_VEC_LENGTH (inner_parms); /* We check that each of the template parameters given in the partial specialization is used in the argument list to the specialization. For example: template <class T> struct S; template <class T> struct S<T*>; The second declaration is OK because `T*' uses the template parameter T, whereas template <class T> struct S<int>; is no good. Even trickier is: template <class T> struct S1 { template <class U> struct S2; template <class U> struct S2<T>; }; The S2<T> declaration is actually invalid; it is a full-specialization. Of course, template <class U> struct S2<T (*)(U)>; or some such would have been OK. */ tpd.level = TMPL_PARMS_DEPTH (current_template_parms); tpd.parms = XALLOCAVEC (int, ntparms); memset (tpd.parms, 0, sizeof (int) * ntparms); tpd.arg_uses_template_parms = XALLOCAVEC (int, nargs); memset (tpd.arg_uses_template_parms, 0, sizeof (int) * nargs); for (i = 0; i < nargs; ++i) { tpd.current_arg = i; for_each_template_parm (TREE_VEC_ELT (inner_args, i), &mark_template_parm, &tpd, NULL, /*include_nondeduced_p=*/false); } for (i = 0; i < ntparms; ++i) if (tpd.parms[i] == 0) { /* One of the template parms was not used in a deduced context in the specialization. */ if (!did_error_intro) { error ("template parameters not deducible in " "partial specialization:"); did_error_intro = true; } inform (input_location, " %qD", TREE_VALUE (TREE_VEC_ELT (inner_parms, i))); } if (did_error_intro) return error_mark_node; /* [temp.class.spec] The argument list of the specialization shall not be identical to the implicit argument list of the primary template. */ tree main_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (maintmpl))); if (comp_template_args (inner_args, INNERMOST_TEMPLATE_ARGS (main_args)) && (!flag_concepts || !strictly_subsumes (current_template_constraints (), main_args, maintmpl))) { if (!flag_concepts) error ("partial specialization %q+D does not specialize " "any template arguments; to define the primary template, " "remove the template argument list", decl); else error ("partial specialization %q+D does not specialize any " "template arguments and is not more constrained than " "the primary template; to define the primary template, " "remove the template argument list", decl); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); } /* A partial specialization that replaces multiple parameters of the primary template with a pack expansion is less specialized for those parameters. */ if (nargs < DECL_NTPARMS (maintmpl)) { error ("partial specialization is not more specialized than the " "primary template because it replaces multiple parameters " "with a pack expansion"); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); /* Avoid crash in process_partial_specialization. */ return decl; } else if (nargs > DECL_NTPARMS (maintmpl)) { error ("too many arguments for partial specialization %qT", type); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); /* Avoid crash below. */ return decl; } /* If we aren't in a dependent class, we can actually try deduction. */ else if (tpd.level == 1 /* FIXME we should be able to handle a partial specialization of a partial instantiation, but currently we can't (c++/41727). */ && TMPL_ARGS_DEPTH (specargs) == 1 && !get_partial_spec_bindings (maintmpl, maintmpl, specargs)) { auto_diagnostic_group d; if (permerror (input_location, "partial specialization %qD is not " "more specialized than", decl)) inform (DECL_SOURCE_LOCATION (maintmpl), "primary template %qD", maintmpl); } /* [temp.class.spec] A partially specialized non-type argument expression shall not involve template parameters of the partial specialization except when the argument expression is a simple identifier. The type of a template parameter corresponding to a specialized non-type argument shall not be dependent on a parameter of the specialization. Also, we verify that pack expansions only occur at the end of the argument list. */ tpd2.parms = 0; for (i = 0; i < nargs; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (main_inner_parms, i)); tree arg = TREE_VEC_ELT (inner_args, i); tree packed_args = NULL_TREE; int j, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Extract the arguments from the argument pack. We'll be iterating over these in the following loop. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (j = 0; j < len; j++) { if (packed_args) /* Get the Jth argument in the parameter pack. */ arg = TREE_VEC_ELT (packed_args, j); if (PACK_EXPANSION_P (arg)) { /* Pack expansions must come at the end of the argument list. */ if ((packed_args && j < len - 1) || (!packed_args && i < nargs - 1)) { if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) error ("parameter pack argument %qE must be at the " "end of the template argument list", arg); else error ("parameter pack argument %qT must be at the " "end of the template argument list", arg); } } if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) /* We only care about the pattern. */ arg = PACK_EXPANSION_PATTERN (arg); if (/* These first two lines are the `non-type' bit. */ !TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_DECL /* This next two lines are the `argument expression is not just a simple identifier' condition and also the `specialized non-type argument' bit. */ && TREE_CODE (arg) != TEMPLATE_PARM_INDEX && !((REFERENCE_REF_P (arg) || TREE_CODE (arg) == VIEW_CONVERT_EXPR) && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_PARM_INDEX)) { if ((!packed_args && tpd.arg_uses_template_parms[i]) || (packed_args && uses_template_parms (arg))) error_at (cp_expr_loc_or_input_loc (arg), "template argument %qE involves template " "parameter(s)", arg); else { /* Look at the corresponding template parameter, marking which template parameters its type depends upon. */ tree type = TREE_TYPE (parm); if (!tpd2.parms) { /* We haven't yet initialized TPD2. Do so now. */ tpd2.arg_uses_template_parms = XALLOCAVEC (int, nargs); /* The number of parameters here is the number in the main template, which, as checked in the assertion above, is NARGS. */ tpd2.parms = XALLOCAVEC (int, nargs); tpd2.level = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (maintmpl)); } /* Mark the template parameters. But this time, we're looking for the template parameters of the main template, not in the specialization. */ tpd2.current_arg = i; tpd2.arg_uses_template_parms[i] = 0; memset (tpd2.parms, 0, sizeof (int) * nargs); for_each_template_parm (type, &mark_template_parm, &tpd2, NULL, /*include_nondeduced_p=*/false); if (tpd2.arg_uses_template_parms [i]) { /* The type depended on some template parameters. If they are fully specialized in the specialization, that's OK. */ int j; int count = 0; for (j = 0; j < nargs; ++j) if (tpd2.parms[j] != 0 && tpd.arg_uses_template_parms [j]) ++count; if (count != 0) error_n (input_location, count, "type %qT of template argument %qE depends " "on a template parameter", "type %qT of template argument %qE depends " "on template parameters", type, arg); } } } } } /* We should only get here once. */ if (TREE_CODE (decl) == TYPE_DECL) gcc_assert (!COMPLETE_TYPE_P (type)); // Build the template decl. tree tmpl = build_template_decl (decl, current_template_parms, DECL_MEMBER_TEMPLATE_P (maintmpl)); TREE_TYPE (tmpl) = type; DECL_TEMPLATE_RESULT (tmpl) = decl; SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = build_template_info (maintmpl, specargs); DECL_PRIMARY_TEMPLATE (tmpl) = maintmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ for (i = 0; i < ntparms; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (inner_parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (VAR_P (decl)) /* We didn't register this in check_explicit_specialization so we could wait until the constraints were set. */ decl = register_specialization (decl, maintmpl, specargs, false, 0); else associate_classtype_constraints (type); DECL_TEMPLATE_SPECIALIZATIONS (maintmpl) = tree_cons (specargs, tmpl, DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)); TREE_TYPE (DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)) = type; for (inst = DECL_TEMPLATE_INSTANTIATIONS (maintmpl); inst; inst = TREE_CHAIN (inst)) { tree instance = TREE_VALUE (inst); if (TYPE_P (instance) ? (COMPLETE_TYPE_P (instance) && CLASSTYPE_IMPLICIT_INSTANTIATION (instance)) : DECL_TEMPLATE_INSTANTIATION (instance)) { tree spec = most_specialized_partial_spec (instance, tf_none); tree inst_decl = (DECL_P (instance) ? instance : TYPE_NAME (instance)); if (!spec) /* OK */; else if (spec == error_mark_node) permerror (input_location, "declaration of %qD ambiguates earlier template " "instantiation for %qD", decl, inst_decl); else if (TREE_VALUE (spec) == tmpl) permerror (input_location, "partial specialization of %qD after instantiation " "of %qD", decl, inst_decl); } } return decl; } /* PARM is a template parameter of some form; return the corresponding TEMPLATE_PARM_INDEX. */ static tree get_template_parm_index (tree parm) { if (TREE_CODE (parm) == PARM_DECL || TREE_CODE (parm) == CONST_DECL) parm = DECL_INITIAL (parm); else if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) parm = TEMPLATE_TYPE_PARM_INDEX (parm); gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX); return parm; } /* Subroutine of fixed_parameter_pack_p below. Look for any template parameter packs used by the template parameter PARM. */ static void fixed_parameter_pack_p_1 (tree parm, struct find_parameter_pack_data *ppd) { /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL || parm == error_mark_node) return; else if (TREE_CODE (parm) == PARM_DECL) { cp_walk_tree (&TREE_TYPE (parm), &find_parameter_packs_r, ppd, ppd->visited); return; } gcc_assert (TREE_CODE (parm) == TEMPLATE_DECL); tree vec = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (parm)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) { tree p = TREE_VALUE (TREE_VEC_ELT (vec, i)); if (template_parameter_pack_p (p)) /* Any packs in the type are expanded by this parameter. */; else fixed_parameter_pack_p_1 (p, ppd); } } /* PARM is a template parameter pack. Return any parameter packs used in its type or the type of any of its template parameters. If there are any such packs, it will be instantiated into a fixed template parameter list by partial instantiation rather than be fully deduced. */ tree fixed_parameter_pack_p (tree parm) { /* This can only be true in a member template. */ if (TEMPLATE_PARM_ORIG_LEVEL (get_template_parm_index (parm)) < 2) return NULL_TREE; /* This can only be true for a parameter pack. */ if (!template_parameter_pack_p (parm)) return NULL_TREE; /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL) return NULL_TREE; tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; fixed_parameter_pack_p_1 (parm, &ppd); delete ppd.visited; return parameter_packs; } /* Check that a template declaration's use of default arguments and parameter packs is not invalid. Here, PARMS are the template parameters. IS_PRIMARY is true if DECL is the thing declared by a primary template. IS_PARTIAL is true if DECL is a partial specialization. IS_FRIEND_DECL is nonzero if DECL is either a non-defining friend function template declaration or a friend class template declaration. In the function case, 1 indicates a declaration, 2 indicates a redeclaration. When IS_FRIEND_DECL=2, no errors are emitted for extraneous default arguments. Returns TRUE if there were no errors found, FALSE otherwise. */ bool check_default_tmpl_args (tree decl, tree parms, bool is_primary, bool is_partial, int is_friend_decl) { const char *msg; int last_level_to_check; tree parm_level; bool no_errors = true; /* [temp.param] A default template-argument shall not be specified in a function template declaration or a function template definition, nor in the template-parameter-list of the definition of a member of a class template. */ if (TREE_CODE (CP_DECL_CONTEXT (decl)) == FUNCTION_DECL || (TREE_CODE (decl) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (decl))) /* You can't have a function template declaration in a local scope, nor you can you define a member of a class template in a local scope. */ return true; if ((TREE_CODE (decl) == TYPE_DECL && TREE_TYPE (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (TREE_CODE (decl) == FUNCTION_DECL && LAMBDA_FUNCTION_P (decl))) /* A lambda doesn't have an explicit declaration; don't complain about the parms of the enclosing class. */ return true; if (current_class_type && !TYPE_BEING_DEFINED (current_class_type) && DECL_LANG_SPECIFIC (decl) && DECL_DECLARES_FUNCTION_P (decl) /* If this is either a friend defined in the scope of the class or a member function. */ && (DECL_FUNCTION_MEMBER_P (decl) ? same_type_p (DECL_CONTEXT (decl), current_class_type) : DECL_FRIEND_CONTEXT (decl) ? same_type_p (DECL_FRIEND_CONTEXT (decl), current_class_type) : false) /* And, if it was a member function, it really was defined in the scope of the class. */ && (!DECL_FUNCTION_MEMBER_P (decl) || DECL_INITIALIZED_IN_CLASS_P (decl))) /* We already checked these parameters when the template was declared, so there's no need to do it again now. This function was defined in class scope, but we're processing its body now that the class is complete. */ return true; /* Core issue 226 (C++0x only): the following only applies to class templates. */ if (is_primary && ((cxx_dialect == cxx98) || TREE_CODE (decl) != FUNCTION_DECL)) { /* [temp.param] If a template-parameter has a default template-argument, all subsequent template-parameters shall have a default template-argument supplied. */ for (parm_level = parms; parm_level; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int ntparms = TREE_VEC_LENGTH (inner_parms); int seen_def_arg_p = 0; int i; for (i = 0; i < ntparms; ++i) { tree parm = TREE_VEC_ELT (inner_parms, i); if (parm == error_mark_node) continue; if (TREE_PURPOSE (parm)) seen_def_arg_p = 1; else if (seen_def_arg_p && !template_parameter_pack_p (TREE_VALUE (parm))) { error ("no default argument for %qD", TREE_VALUE (parm)); /* For better subsequent error-recovery, we indicate that there should have been a default argument. */ TREE_PURPOSE (parm) = error_mark_node; no_errors = false; } else if (!is_partial && !is_friend_decl /* Don't complain about an enclosing partial specialization. */ && parm_level == parms && TREE_CODE (decl) == TYPE_DECL && i < ntparms - 1 && template_parameter_pack_p (TREE_VALUE (parm)) /* A fixed parameter pack will be partially instantiated into a fixed length list. */ && !fixed_parameter_pack_p (TREE_VALUE (parm))) { /* A primary class template can only have one parameter pack, at the end of the template parameter list. */ error ("parameter pack %q+D must be at the end of the" " template parameter list", TREE_VALUE (parm)); TREE_VALUE (TREE_VEC_ELT (inner_parms, i)) = error_mark_node; no_errors = false; } } } } if (((cxx_dialect == cxx98) && TREE_CODE (decl) != TYPE_DECL) || is_partial || !is_primary || is_friend_decl) /* For an ordinary class template, default template arguments are allowed at the innermost level, e.g.: template <class T = int> struct S {}; but, in a partial specialization, they're not allowed even there, as we have in [temp.class.spec]: The template parameter list of a specialization shall not contain default template argument values. So, for a partial specialization, or for a function template (in C++98/C++03), we look at all of them. */ ; else /* But, for a primary class template that is not a partial specialization we look at all template parameters except the innermost ones. */ parms = TREE_CHAIN (parms); /* Figure out what error message to issue. */ if (is_friend_decl == 2) msg = G_("default template arguments may not be used in function template " "friend re-declaration"); else if (is_friend_decl) msg = G_("default template arguments may not be used in template " "friend declarations"); else if (TREE_CODE (decl) == FUNCTION_DECL && (cxx_dialect == cxx98)) msg = G_("default template arguments may not be used in function templates " "without %<-std=c++11%> or %<-std=gnu++11%>"); else if (is_partial) msg = G_("default template arguments may not be used in " "partial specializations"); else if (current_class_type && CLASSTYPE_IS_TEMPLATE (current_class_type)) msg = G_("default argument for template parameter for class enclosing %qD"); else /* Per [temp.param]/9, "A default template-argument shall not be specified in the template-parameter-lists of the definition of a member of a class template that appears outside of the member's class.", thus if we aren't handling a member of a class template there is no need to examine the parameters. */ return true; if (current_class_type && TYPE_BEING_DEFINED (current_class_type)) /* If we're inside a class definition, there's no need to examine the parameters to the class itself. On the one hand, they will be checked when the class is defined, and, on the other, default arguments are valid in things like: template <class T = double> struct S { template <class U> void f(U); }; Here the default argument for `S' has no bearing on the declaration of `f'. */ last_level_to_check = template_class_depth (current_class_type) + 1; else /* Check everything. */ last_level_to_check = 0; for (parm_level = parms; parm_level && TMPL_PARMS_DEPTH (parm_level) >= last_level_to_check; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int i; int ntparms; ntparms = TREE_VEC_LENGTH (inner_parms); for (i = 0; i < ntparms; ++i) { if (TREE_VEC_ELT (inner_parms, i) == error_mark_node) continue; if (TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i))) { if (msg) { no_errors = false; if (is_friend_decl == 2) return no_errors; error (msg, decl); msg = 0; } /* Clear out the default argument so that we are not confused later. */ TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)) = NULL_TREE; } } /* At this point, if we're still interested in issuing messages, they must apply to classes surrounding the object declared. */ if (msg) msg = G_("default argument for template parameter for class " "enclosing %qD"); } return no_errors; } /* Worker for push_template_decl_real, called via for_each_template_parm. DATA is really an int, indicating the level of the parameters we are interested in. If T is a template parameter of that level, return nonzero. */ static int template_parm_this_level_p (tree t, void* data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level == this_level; } /* Worker for uses_outer_template_parms, called via for_each_template_parm. DATA is really an int, indicating the innermost outer level of parameters. If T is a template parameter of that level or further out, return nonzero. */ static int template_parm_outer_level (tree t, void *data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level <= this_level; } /* Creates a TEMPLATE_DECL for the indicated DECL using the template parameters given by current_template_args, or reuses a previously existing one, if appropriate. Returns the DECL, or an equivalent one, if it is replaced via a call to duplicate_decls. If IS_FRIEND is true, DECL is a friend declaration. */ tree push_template_decl_real (tree decl, bool is_friend) { tree tmpl; tree args; tree info; tree ctx; bool is_primary; bool is_partial; int new_template_p = 0; /* True if the template is a member template, in the sense of [temp.mem]. */ bool member_template_p = false; if (decl == error_mark_node || !current_template_parms) return error_mark_node; /* See if this is a partial specialization. */ is_partial = ((DECL_IMPLICIT_TYPEDEF_P (decl) && TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) || (VAR_P (decl) && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)))); if (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_P (decl)) is_friend = true; if (is_friend) /* For a friend, we want the context of the friend, not the type of which it is a friend. */ ctx = CP_DECL_CONTEXT (decl); else if (CP_DECL_CONTEXT (decl) && TREE_CODE (CP_DECL_CONTEXT (decl)) != NAMESPACE_DECL) /* In the case of a virtual function, we want the class in which it is defined. */ ctx = CP_DECL_CONTEXT (decl); else /* Otherwise, if we're currently defining some class, the DECL is assumed to be a member of the class. */ ctx = current_scope (); if (ctx && TREE_CODE (ctx) == NAMESPACE_DECL) ctx = NULL_TREE; if (!DECL_CONTEXT (decl)) DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); /* See if this is a primary template. */ if (is_friend && ctx && uses_template_parms_level (ctx, processing_template_decl)) /* A friend template that specifies a class context, i.e. template <typename T> friend void A<T>::f(); is not primary. */ is_primary = false; else if (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) is_primary = false; else is_primary = template_parm_scope_p (); if (is_primary) { warning (OPT_Wtemplates, "template %qD declared", decl); if (DECL_CLASS_SCOPE_P (decl)) member_template_p = true; if (TREE_CODE (decl) == TYPE_DECL && IDENTIFIER_ANON_P (DECL_NAME (decl))) { error ("template class without a name"); return error_mark_node; } else if (TREE_CODE (decl) == FUNCTION_DECL) { if (member_template_p) { if (DECL_OVERRIDE_P (decl) || DECL_FINAL_P (decl)) error ("member template %qD may not have virt-specifiers", decl); } if (DECL_DESTRUCTOR_P (decl)) { /* [temp.mem] A destructor shall not be a member template. */ error_at (DECL_SOURCE_LOCATION (decl), "destructor %qD declared as member template", decl); return error_mark_node; } if (IDENTIFIER_NEWDEL_OP_P (DECL_NAME (decl)) && (!prototype_p (TREE_TYPE (decl)) || TYPE_ARG_TYPES (TREE_TYPE (decl)) == void_list_node || !TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) || (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) == void_list_node))) { /* [basic.stc.dynamic.allocation] An allocation function can be a function template. ... Template allocation functions shall have two or more parameters. */ error ("invalid template declaration of %qD", decl); return error_mark_node; } } else if (DECL_IMPLICIT_TYPEDEF_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { /* Class template, set TEMPLATE_TYPE_PARM_FOR_CLASS. */ tree parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); for (int i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree t = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (t) = true; } } else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) /* alias-declaration */ gcc_assert (!DECL_ARTIFICIAL (decl)); else if (VAR_P (decl)) /* C++14 variable template. */; else if (TREE_CODE (decl) == CONCEPT_DECL) /* C++2a concept definitions. */; else { error ("template declaration of %q#D", decl); return error_mark_node; } } /* Check to see that the rules regarding the use of default arguments are not being violated. We check args for a friend functions when we know whether it's a definition, introducing declaration or re-declaration. */ if (!is_friend || TREE_CODE (decl) != FUNCTION_DECL) check_default_tmpl_args (decl, current_template_parms, is_primary, is_partial, is_friend); /* Ensure that there are no parameter packs in the type of this declaration that have not been expanded. */ if (TREE_CODE (decl) == FUNCTION_DECL) { /* Check each of the arguments individually to see if there are any bare parameter packs. */ tree type = TREE_TYPE (decl); tree arg = DECL_ARGUMENTS (decl); tree argtype = TYPE_ARG_TYPES (type); while (arg && argtype) { if (!DECL_PACK_P (arg) && check_for_bare_parameter_packs (TREE_TYPE (arg))) { /* This is a PARM_DECL that contains unexpanded parameter packs. We have already complained about this in the check_for_bare_parameter_packs call, so just replace these types with ERROR_MARK_NODE. */ TREE_TYPE (arg) = error_mark_node; TREE_VALUE (argtype) = error_mark_node; } arg = DECL_CHAIN (arg); argtype = TREE_CHAIN (argtype); } /* Check for bare parameter packs in the return type and the exception specifiers. */ if (check_for_bare_parameter_packs (TREE_TYPE (type))) /* Errors were already issued, set return type to int as the frontend doesn't expect error_mark_node as the return type. */ TREE_TYPE (type) = integer_type_node; if (check_for_bare_parameter_packs (TYPE_RAISES_EXCEPTIONS (type))) TYPE_RAISES_EXCEPTIONS (type) = NULL_TREE; } else if (check_for_bare_parameter_packs (is_typedef_decl (decl) ? DECL_ORIGINAL_TYPE (decl) : TREE_TYPE (decl))) { TREE_TYPE (decl) = error_mark_node; return error_mark_node; } if (is_partial) return process_partial_specialization (decl); args = current_template_args (); if (!ctx || TREE_CODE (ctx) == FUNCTION_DECL || (CLASS_TYPE_P (ctx) && TYPE_BEING_DEFINED (ctx)) || (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (is_friend && !DECL_TEMPLATE_INFO (decl))) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_TEMPLATE (decl)) tmpl = DECL_TI_TEMPLATE (decl); /* If DECL is a TYPE_DECL for a class-template, then there won't be DECL_LANG_SPECIFIC. The information equivalent to DECL_TEMPLATE_INFO is found in TYPE_TEMPLATE_INFO instead. */ else if (DECL_IMPLICIT_TYPEDEF_P (decl) && TYPE_TEMPLATE_INFO (TREE_TYPE (decl)) && TYPE_TI_TEMPLATE (TREE_TYPE (decl))) { /* Since a template declaration already existed for this class-type, we must be redeclaring it here. Make sure that the redeclaration is valid. */ redeclare_class_template (TREE_TYPE (decl), current_template_parms, current_template_constraints ()); /* We don't need to create a new TEMPLATE_DECL; just use the one we already had. */ tmpl = TYPE_TI_TEMPLATE (TREE_TYPE (decl)); } else { tmpl = build_template_decl (decl, current_template_parms, member_template_p); new_template_p = 1; if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl)) { /* A specialization of a member template of a template class. */ SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = DECL_TEMPLATE_INFO (decl); DECL_TEMPLATE_INFO (decl) = NULL_TREE; } } } else { tree a, t, current, parms; int i; tree tinfo = get_template_info (decl); if (!tinfo) { error ("template definition of non-template %q#D", decl); return error_mark_node; } tmpl = TI_TEMPLATE (tinfo); if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_ARGS (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && DECL_MEMBER_TEMPLATE_P (tmpl)) { tree new_tmpl; /* The declaration is a specialization of a member template, declared outside the class. Therefore, the innermost template arguments will be NULL, so we replace them with the arguments determined by the earlier call to check_explicit_specialization. */ args = DECL_TI_ARGS (decl); new_tmpl = build_template_decl (decl, current_template_parms, member_template_p); DECL_TEMPLATE_RESULT (new_tmpl) = decl; TREE_TYPE (new_tmpl) = TREE_TYPE (decl); DECL_TI_TEMPLATE (decl) = new_tmpl; SET_DECL_TEMPLATE_SPECIALIZATION (new_tmpl); DECL_TEMPLATE_INFO (new_tmpl) = build_template_info (tmpl, args); register_specialization (new_tmpl, most_general_template (tmpl), args, is_friend, 0); return decl; } /* Make sure the template headers we got make sense. */ parms = DECL_TEMPLATE_PARMS (tmpl); i = TMPL_PARMS_DEPTH (parms); if (TMPL_ARGS_DEPTH (args) != i) { error ("expected %d levels of template parms for %q#D, got %d", i, decl, TMPL_ARGS_DEPTH (args)); DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } else for (current = decl; i > 0; --i, parms = TREE_CHAIN (parms)) { a = TMPL_ARGS_LEVEL (args, i); t = INNERMOST_TEMPLATE_PARMS (parms); if (TREE_VEC_LENGTH (t) != TREE_VEC_LENGTH (a)) { if (current == decl) error ("got %d template parameters for %q#D", TREE_VEC_LENGTH (a), decl); else error ("got %d template parameters for %q#T", TREE_VEC_LENGTH (a), current); error (" but %d required", TREE_VEC_LENGTH (t)); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } if (current == decl) current = ctx; else if (current == NULL_TREE) /* Can happen in erroneous input. */ break; else current = get_containing_scope (current); } /* Check that the parms are used in the appropriate qualifying scopes in the declarator. */ if (!comp_template_args (TI_ARGS (tinfo), TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl))))) { error ("template arguments to %qD do not match original " "template %qD", decl, DECL_TEMPLATE_RESULT (tmpl)); if (!uses_template_parms (TI_ARGS (tinfo))) inform (input_location, "use %<template<>%> for" " an explicit specialization"); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } } DECL_TEMPLATE_RESULT (tmpl) = decl; TREE_TYPE (tmpl) = TREE_TYPE (decl); /* Push template declarations for global functions and types. Note that we do not try to push a global template friend declared in a template class; such a thing may well depend on the template parameters of the class. */ if (new_template_p && !ctx && !(is_friend && template_class_depth (current_class_type) > 0)) { tmpl = pushdecl_namespace_level (tmpl, is_friend); if (tmpl == error_mark_node) return error_mark_node; /* Hide template friend classes that haven't been declared yet. */ if (is_friend && TREE_CODE (decl) == TYPE_DECL) { DECL_ANTICIPATED (tmpl) = 1; DECL_FRIEND_P (tmpl) = 1; } } if (is_primary) { tree parms = DECL_TEMPLATE_PARMS (tmpl); DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ parms = INNERMOST_TEMPLATE_PARMS (parms); for (int i = TREE_VEC_LENGTH (parms) - 1; i >= 0; --i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) { if (tree constr = TEMPLATE_PARMS_CONSTRAINTS (DECL_TEMPLATE_PARMS (tmpl))) { /* ??? Why don't we do this here for all templates? */ constr = build_constraints (constr, NULL_TREE); set_constraints (decl, constr); } if (complex_alias_template_p (tmpl)) TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true; } } /* The DECL_TI_ARGS of DECL contains full set of arguments referring back to its most general template. If TMPL is a specialization, ARGS may only have the innermost set of arguments. Add the missing argument levels if necessary. */ if (DECL_TEMPLATE_INFO (tmpl)) args = add_outermost_template_args (DECL_TI_ARGS (tmpl), args); info = build_template_info (tmpl, args); if (DECL_IMPLICIT_TYPEDEF_P (decl)) SET_TYPE_TEMPLATE_INFO (TREE_TYPE (tmpl), info); else { if (is_primary) retrofit_lang_decl (decl); if (DECL_LANG_SPECIFIC (decl)) DECL_TEMPLATE_INFO (decl) = info; } if (flag_implicit_templates && !is_friend && TREE_PUBLIC (decl) && VAR_OR_FUNCTION_DECL_P (decl)) /* Set DECL_COMDAT on template instantiations; if we force them to be emitted by explicit instantiation, mark_needed will tell cgraph to do the right thing. */ DECL_COMDAT (decl) = true; return DECL_TEMPLATE_RESULT (tmpl); } tree push_template_decl (tree decl) { return push_template_decl_real (decl, false); } /* FN is an inheriting constructor that inherits from the constructor template INHERITED; turn FN into a constructor template with a matching template header. */ tree add_inherited_template_parms (tree fn, tree inherited) { tree inner_parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (inherited)); inner_parms = copy_node (inner_parms); tree parms = tree_cons (size_int (processing_template_decl + 1), inner_parms, current_template_parms); tree tmpl = build_template_decl (fn, parms, /*member*/true); tree args = template_parms_to_args (parms); DECL_TEMPLATE_INFO (fn) = build_template_info (tmpl, args); TREE_TYPE (tmpl) = TREE_TYPE (fn); DECL_TEMPLATE_RESULT (tmpl) = fn; DECL_ARTIFICIAL (tmpl) = true; DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; return tmpl; } /* Called when a class template TYPE is redeclared with the indicated template PARMS, e.g.: template <class T> struct S; template <class T> struct S {}; */ bool redeclare_class_template (tree type, tree parms, tree cons) { tree tmpl; tree tmpl_parms; int i; if (!TYPE_TEMPLATE_INFO (type)) { error ("%qT is not a template type", type); return false; } tmpl = TYPE_TI_TEMPLATE (type); if (!PRIMARY_TEMPLATE_P (tmpl)) /* The type is nested in some template class. Nothing to worry about here; there are no new template parameters for the nested type. */ return true; if (!parms) { error ("template specifiers not specified in declaration of %qD", tmpl); return false; } parms = INNERMOST_TEMPLATE_PARMS (parms); tmpl_parms = DECL_INNERMOST_TEMPLATE_PARMS (tmpl); if (TREE_VEC_LENGTH (parms) != TREE_VEC_LENGTH (tmpl_parms)) { error_n (input_location, TREE_VEC_LENGTH (parms), "redeclared with %d template parameter", "redeclared with %d template parameters", TREE_VEC_LENGTH (parms)); inform_n (DECL_SOURCE_LOCATION (tmpl), TREE_VEC_LENGTH (tmpl_parms), "previous declaration %qD used %d template parameter", "previous declaration %qD used %d template parameters", tmpl, TREE_VEC_LENGTH (tmpl_parms)); return false; } for (i = 0; i < TREE_VEC_LENGTH (tmpl_parms); ++i) { tree tmpl_parm; tree parm; tree tmpl_default; tree parm_default; if (TREE_VEC_ELT (tmpl_parms, i) == error_mark_node || TREE_VEC_ELT (parms, i) == error_mark_node) continue; tmpl_parm = TREE_VALUE (TREE_VEC_ELT (tmpl_parms, i)); if (error_operand_p (tmpl_parm)) return false; parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); tmpl_default = TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)); parm_default = TREE_PURPOSE (TREE_VEC_ELT (parms, i)); /* TMPL_PARM and PARM can be either TYPE_DECL, PARM_DECL, or TEMPLATE_DECL. */ if (TREE_CODE (tmpl_parm) != TREE_CODE (parm) || (TREE_CODE (tmpl_parm) != TYPE_DECL && !same_type_p (TREE_TYPE (tmpl_parm), TREE_TYPE (parm))) || (TREE_CODE (tmpl_parm) != PARM_DECL && (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (tmpl_parm)) != TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))) || (TREE_CODE (tmpl_parm) == PARM_DECL && (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (tmpl_parm)) != TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))))) { auto_diagnostic_group d; error ("template parameter %q+#D", tmpl_parm); inform (input_location, "redeclared here as %q#D", parm); return false; } /* The parameters can be declared to introduce different constraints. */ tree p1 = TREE_VEC_ELT (tmpl_parms, i); tree p2 = TREE_VEC_ELT (parms, i); if (!template_parameter_constraints_equivalent_p (p1, p2)) { auto_diagnostic_group d; error ("declaration of template parameter %q+#D with different " "constraints", parm); inform (DECL_SOURCE_LOCATION (tmpl_parm), "original declaration appeared here"); return false; } if (tmpl_default != NULL_TREE && parm_default != NULL_TREE) { /* We have in [temp.param]: A template-parameter may not be given default arguments by two different declarations in the same scope. */ auto_diagnostic_group d; error_at (input_location, "redefinition of default argument for %q#D", parm); inform (DECL_SOURCE_LOCATION (tmpl_parm), "original definition appeared here"); return false; } if (parm_default != NULL_TREE) /* Update the previous template parameters (which are the ones that will really count) with the new default value. */ TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)) = parm_default; else if (tmpl_default != NULL_TREE) /* Update the new parameters, too; they'll be used as the parameters for any members. */ TREE_PURPOSE (TREE_VEC_ELT (parms, i)) = tmpl_default; /* Give each template template parm in this redeclaration a DECL_CONTEXT of the template for which they are a parameter. */ if (TREE_CODE (parm) == TEMPLATE_DECL) { gcc_assert (DECL_CONTEXT (parm) == NULL_TREE); DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (parm) == TYPE_DECL) TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (parm)) = true; } tree ci = get_constraints (tmpl); tree req1 = ci ? CI_TEMPLATE_REQS (ci) : NULL_TREE; tree req2 = cons ? CI_TEMPLATE_REQS (cons) : NULL_TREE; /* Two classes with different constraints declare different entities. */ if (!cp_tree_equal (req1, req2)) { auto_diagnostic_group d; error_at (input_location, "redeclaration %q#D with different " "constraints", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "original declaration appeared here"); return false; } return true; } /* The actual substitution part of instantiate_non_dependent_expr_sfinae, to be used when the caller has already checked (processing_template_decl && !instantiation_dependent_expression_p (expr) && potential_constant_expression (expr)) and cleared processing_template_decl. */ tree instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain) { return tsubst_copy_and_build (expr, /*args=*/NULL_TREE, complain, /*in_decl=*/NULL_TREE, /*function_p=*/false, /*integral_constant_expression_p=*/true); } /* Simplify EXPR if it is a non-dependent expression. Returns the (possibly simplified) expression. */ tree instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain) { if (expr == NULL_TREE) return NULL_TREE; /* If we're in a template, but EXPR isn't value dependent, simplify it. We're supposed to treat: template <typename T> void f(T[1 + 1]); template <typename T> void f(T[2]); as two declarations of the same function, for example. */ if (processing_template_decl && is_nondependent_constant_expression (expr)) { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, complain); } return expr; } tree instantiate_non_dependent_expr (tree expr) { return instantiate_non_dependent_expr_sfinae (expr, tf_error); } /* Like instantiate_non_dependent_expr, but return NULL_TREE rather than an uninstantiated expression. */ tree instantiate_non_dependent_or_null (tree expr) { if (expr == NULL_TREE) return NULL_TREE; if (processing_template_decl) { if (!is_nondependent_constant_expression (expr)) expr = NULL_TREE; else { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, tf_error); } } return expr; } /* True iff T is a specialization of a variable template. */ bool variable_template_specialization_p (tree t) { if (!VAR_P (t) || !DECL_LANG_SPECIFIC (t) || !DECL_TEMPLATE_INFO (t)) return false; tree tmpl = DECL_TI_TEMPLATE (t); return variable_template_p (tmpl); } /* Return TRUE iff T is a type alias, a TEMPLATE_DECL for an alias template declaration, or a TYPE_DECL for an alias declaration. */ bool alias_type_or_template_p (tree t) { if (t == NULL_TREE) return false; return ((TREE_CODE (t) == TYPE_DECL && TYPE_DECL_ALIAS_P (t)) || (TYPE_P (t) && TYPE_NAME (t) && TYPE_DECL_ALIAS_P (TYPE_NAME (t))) || DECL_ALIAS_TEMPLATE_P (t)); } /* If T is a specialization of an alias template, return it; otherwise return NULL_TREE. If TRANSPARENT_TYPEDEFS is true, look through other aliases. */ tree alias_template_specialization_p (const_tree t, bool transparent_typedefs) { if (!TYPE_P (t)) return NULL_TREE; /* It's an alias template specialization if it's an alias and its TYPE_NAME is a specialization of a primary template. */ if (typedef_variant_p (t)) { if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))) return CONST_CAST_TREE (t); if (transparent_typedefs) return alias_template_specialization_p (DECL_ORIGINAL_TYPE (TYPE_NAME (t)), transparent_typedefs); } return NULL_TREE; } /* An alias template is complex from a SFINAE perspective if a template-id using that alias can be ill-formed when the expansion is not, as with the void_t template. We determine this by checking whether the expansion for the alias template uses all its template parameters. */ struct uses_all_template_parms_data { int level; bool *seen; }; static int uses_all_template_parms_r (tree t, void *data_) { struct uses_all_template_parms_data &data = *(struct uses_all_template_parms_data*)data_; tree idx = get_template_parm_index (t); if (TEMPLATE_PARM_LEVEL (idx) == data.level) data.seen[TEMPLATE_PARM_IDX (idx)] = true; return 0; } /* for_each_template_parm any_fn callback for complex_alias_template_p. */ static int complex_pack_expansion_r (tree t, void *data_) { /* An alias template with a pack expansion that expands a pack from the enclosing class needs to be considered complex, to avoid confusion with the same pack being used as an argument to the alias's own template parameter (91966). */ if (!PACK_EXPANSION_P (t)) return 0; struct uses_all_template_parms_data &data = *(struct uses_all_template_parms_data*)data_; for (tree pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); if (!TEMPLATE_PARM_P (parm_pack)) continue; int idx, level; template_parm_level_and_index (parm_pack, &level, &idx); if (level < data.level) return 1; } return 0; } static bool complex_alias_template_p (const_tree tmpl) { /* A renaming alias isn't complex. */ if (get_underlying_template (CONST_CAST_TREE (tmpl)) != tmpl) return false; /* Any other constrained alias is complex. */ if (get_constraints (tmpl)) return true; struct uses_all_template_parms_data data; tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree parms = DECL_TEMPLATE_PARMS (tmpl); data.level = TMPL_PARMS_DEPTH (parms); int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms)); data.seen = XALLOCAVEC (bool, len); for (int i = 0; i < len; ++i) data.seen[i] = false; if (for_each_template_parm (pat, uses_all_template_parms_r, &data, NULL, true, complex_pack_expansion_r)) return true; for (int i = 0; i < len; ++i) if (!data.seen[i]) return true; return false; } /* If T is a specialization of a complex alias template with dependent template-arguments, return it; otherwise return NULL_TREE. If T is a typedef to such a specialization, return the specialization. */ tree dependent_alias_template_spec_p (const_tree t, bool transparent_typedefs) { if (!TYPE_P (t) || !typedef_variant_p (t)) return NULL_TREE; tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t); if (tinfo && TEMPLATE_DECL_COMPLEX_ALIAS_P (TI_TEMPLATE (tinfo)) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo))))) return CONST_CAST_TREE (t); if (transparent_typedefs) { tree utype = DECL_ORIGINAL_TYPE (TYPE_NAME (t)); return dependent_alias_template_spec_p (utype, transparent_typedefs); } return NULL_TREE; } /* Return the number of innermost template parameters in TMPL. */ static int num_innermost_template_parms (const_tree tmpl) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); return TREE_VEC_LENGTH (parms); } /* Return either TMPL or another template that it is equivalent to under DR 1286: An alias that just changes the name of a template is equivalent to the other template. */ static tree get_underlying_template (tree tmpl) { gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); while (DECL_ALIAS_TEMPLATE_P (tmpl)) { /* Determine if the alias is equivalent to an underlying template. */ tree orig_type = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); /* The underlying type may have been ill-formed. Don't proceed. */ if (!orig_type) break; tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (orig_type); if (!tinfo) break; tree underlying = TI_TEMPLATE (tinfo); if (!PRIMARY_TEMPLATE_P (underlying) || (num_innermost_template_parms (tmpl) != num_innermost_template_parms (underlying))) break; tree alias_args = INNERMOST_TEMPLATE_ARGS (generic_targs_for (tmpl)); if (!comp_template_args (TI_ARGS (tinfo), alias_args)) break; /* If TMPL adds or changes any constraints, it isn't equivalent. I think it's appropriate to treat a less-constrained alias as equivalent. */ if (!at_least_as_constrained (underlying, tmpl)) break; /* Alias is equivalent. Strip it and repeat. */ tmpl = underlying; } return tmpl; } /* Subroutine of convert_nontype_argument. Converts EXPR to TYPE, which must be a reference-to-function or a pointer-to-function type, as specified in [temp.arg.nontype]: disambiguate EXPR if it is an overload set, and check that the resulting function has external linkage. */ static tree convert_nontype_argument_function (tree type, tree expr, tsubst_flags_t complain) { tree fns = expr; tree fn, fn_no_ptr; linkage_kind linkage; fn = instantiate_type (type, fns, tf_none); if (fn == error_mark_node) return error_mark_node; if (value_dependent_expression_p (fn)) goto accept; fn_no_ptr = strip_fnptr_conv (fn); if (TREE_CODE (fn_no_ptr) == ADDR_EXPR) fn_no_ptr = TREE_OPERAND (fn_no_ptr, 0); if (BASELINK_P (fn_no_ptr)) fn_no_ptr = BASELINK_FUNCTIONS (fn_no_ptr); /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external [C++11: or internal] linkage. */ STRIP_ANY_LOCATION_WRAPPER (fn_no_ptr); if (TREE_CODE (fn_no_ptr) != FUNCTION_DECL) { if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (expr); error_at (loc, "%qE is not a valid template argument for type %qT", expr, type); if (TYPE_PTR_P (type)) inform (loc, "it must be the address of a function " "with external linkage"); else inform (loc, "it must be the name of a function with " "external linkage"); } return NULL_TREE; } linkage = decl_linkage (fn_no_ptr); if (cxx_dialect >= cxx11 ? linkage == lk_none : linkage != lk_external) { if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (expr); if (cxx_dialect >= cxx11) error_at (loc, "%qE is not a valid template argument for type " "%qT because %qD has no linkage", expr, type, fn_no_ptr); else error_at (loc, "%qE is not a valid template argument for type " "%qT because %qD does not have external linkage", expr, type, fn_no_ptr); } return NULL_TREE; } accept: if (TYPE_REF_P (type)) { if (REFERENCE_REF_P (fn)) fn = TREE_OPERAND (fn, 0); else fn = build_address (fn); } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (fn))) fn = build_nop (type, fn); return fn; } /* Subroutine of convert_nontype_argument. Check if EXPR of type TYPE is a valid pointer-to-member constant. Emit an error otherwise. */ static bool check_valid_ptrmem_cst_expr (tree type, tree expr, tsubst_flags_t complain) { tree orig_expr = expr; STRIP_NOPS (expr); if (null_ptr_cst_p (expr)) return true; if (TREE_CODE (expr) == PTRMEM_CST && same_type_p (TYPE_PTRMEM_CLASS_TYPE (type), PTRMEM_CST_CLASS (expr))) return true; if (cxx_dialect >= cxx11 && null_member_pointer_value_p (expr)) return true; if (processing_template_decl && TREE_CODE (expr) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == OFFSET_REF) return true; if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (orig_expr); error_at (loc, "%qE is not a valid template argument for type %qT", orig_expr, type); if (TREE_CODE (expr) != PTRMEM_CST) inform (loc, "it must be a pointer-to-member of the form %<&X::Y%>"); else inform (loc, "because it is a member of %qT", PTRMEM_CST_CLASS (expr)); } return false; } /* Returns TRUE iff the address of OP is value-dependent. 14.6.2.4 [temp.dep.temp]: A non-integral non-type template-argument is dependent if its type is dependent or it has either of the following forms qualified-id & qualified-id and contains a nested-name-specifier which specifies a class-name that names a dependent type. We generalize this to just say that the address of a member of a dependent class is value-dependent; the above doesn't cover the address of a static data member named with an unqualified-id. */ static bool has_value_dependent_address (tree op) { STRIP_ANY_LOCATION_WRAPPER (op); /* We could use get_inner_reference here, but there's no need; this is only relevant for template non-type arguments, which can only be expressed as &id-expression. */ if (DECL_P (op)) { tree ctx = CP_DECL_CONTEXT (op); if (TYPE_P (ctx) && dependent_type_p (ctx)) return true; } return false; } /* The next set of functions are used for providing helpful explanatory diagnostics for failed overload resolution. Their messages should be indented by two spaces for consistency with the messages in call.c */ static int unify_success (bool /*explain_p*/) { return 0; } /* Other failure functions should call this one, to provide a single function for setting a breakpoint on. */ static int unify_invalid (bool /*explain_p*/) { return 1; } static int unify_parameter_deduction_failure (bool explain_p, tree parm) { if (explain_p) inform (input_location, " couldn%'t deduce template parameter %qD", parm); return unify_invalid (explain_p); } static int unify_cv_qual_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " types %qT and %qT have incompatible cv-qualifiers", parm, arg); return unify_invalid (explain_p); } static int unify_type_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " mismatched types %qT and %qT", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template parameter %qD is not a parameter pack, but " "argument %qD is", parm, arg); return unify_invalid (explain_p); } static int unify_ptrmem_cst_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match " "pointer-to-member constant %qE", arg, parm); return unify_invalid (explain_p); } static int unify_expression_unequal (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " %qE is not equivalent to %qE", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_inconsistent (bool explain_p, tree old_arg, tree new_arg) { if (explain_p) inform (input_location, " inconsistent parameter pack deduction with %qT and %qT", old_arg, new_arg); return unify_invalid (explain_p); } static int unify_inconsistency (bool explain_p, tree parm, tree first, tree second) { if (explain_p) { if (TYPE_P (parm)) inform (input_location, " deduced conflicting types for parameter %qT (%qT and %qT)", parm, first, second); else inform (input_location, " deduced conflicting values for non-type parameter " "%qE (%qE and %qE)", parm, first, second); } return unify_invalid (explain_p); } static int unify_vla_arg (bool explain_p, tree arg) { if (explain_p) inform (input_location, " variable-sized array type %qT is not " "a valid template argument", arg); return unify_invalid (explain_p); } static int unify_method_type_error (bool explain_p, tree arg) { if (explain_p) inform (input_location, " member function type %qT is not a valid template argument", arg); return unify_invalid (explain_p); } static int unify_arity (bool explain_p, int have, int wanted, bool least_p = false) { if (explain_p) { if (least_p) inform_n (input_location, wanted, " candidate expects at least %d argument, %d provided", " candidate expects at least %d arguments, %d provided", wanted, have); else inform_n (input_location, wanted, " candidate expects %d argument, %d provided", " candidate expects %d arguments, %d provided", wanted, have); } return unify_invalid (explain_p); } static int unify_too_many_arguments (bool explain_p, int have, int wanted) { return unify_arity (explain_p, have, wanted); } static int unify_too_few_arguments (bool explain_p, int have, int wanted, bool least_p = false) { return unify_arity (explain_p, have, wanted, least_p); } static int unify_arg_conversion (bool explain_p, tree to_type, tree from_type, tree arg) { if (explain_p) inform (cp_expr_loc_or_input_loc (arg), " cannot convert %qE (type %qT) to type %qT", arg, from_type, to_type); return unify_invalid (explain_p); } static int unify_no_common_base (bool explain_p, enum template_base_result r, tree parm, tree arg) { if (explain_p) switch (r) { case tbr_ambiguous_baseclass: inform (input_location, " %qT is an ambiguous base class of %qT", parm, arg); break; default: inform (input_location, " %qT is not derived from %qT", arg, parm); break; } return unify_invalid (explain_p); } static int unify_inconsistent_template_template_parameters (bool explain_p) { if (explain_p) inform (input_location, " template parameters of a template template argument are " "inconsistent with other deduced template arguments"); return unify_invalid (explain_p); } static int unify_template_deduction_failure (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " cannot deduce a template for %qT from non-template type %qT", parm, arg); return unify_invalid (explain_p); } static int unify_template_argument_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match %qE", arg, parm); return unify_invalid (explain_p); } /* True if T is a C++20 template parameter object to store the argument for a template parameter of class type. */ bool template_parm_object_p (const_tree t) { return (TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t) && !strncmp (IDENTIFIER_POINTER (DECL_NAME (t)), "_ZTA", 4)); } /* Subroutine of convert_nontype_argument, to check whether EXPR, as an argument for TYPE, points to an unsuitable object. */ static bool invalid_tparm_referent_p (tree type, tree expr, tsubst_flags_t complain) { switch (TREE_CODE (expr)) { CASE_CONVERT: return invalid_tparm_referent_p (type, TREE_OPERAND (expr, 0), complain); case TARGET_EXPR: return invalid_tparm_referent_p (type, TARGET_EXPR_INITIAL (expr), complain); case CONSTRUCTOR: { unsigned i; tree elt; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt) if (invalid_tparm_referent_p (TREE_TYPE (elt), elt, complain)) return true; } break; case ADDR_EXPR: { tree decl = TREE_OPERAND (expr, 0); if (!VAR_P (decl)) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "because %qE is not a variable", expr, type, decl); return true; } else if (cxx_dialect < cxx11 && !DECL_EXTERNAL_LINKAGE_P (decl)) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "in C++98 because %qD does not have external linkage", expr, type, decl); return true; } else if ((cxx_dialect >= cxx11 && cxx_dialect < cxx17) && decl_linkage (decl) == lk_none) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "because %qD has no linkage", expr, type, decl); return true; } /* C++17: For a non-type template-parameter of reference or pointer type, the value of the constant expression shall not refer to (or for a pointer type, shall not be the address of): * a subobject (4.5), * a temporary object (15.2), * a string literal (5.13.5), * the result of a typeid expression (8.2.8), or * a predefined __func__ variable (11.4.1). */ else if (DECL_ARTIFICIAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument", decl); return true; } else if (!same_type_ignoring_top_level_qualifiers_p (strip_array_types (TREE_TYPE (type)), strip_array_types (TREE_TYPE (decl)))) { if (complain & tf_error) error ("the address of the %qT subobject of %qD is not a " "valid template argument", TREE_TYPE (type), decl); return true; } else if (!TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument " "because it does not have static storage duration", decl); return true; } } break; default: if (!INDIRECT_TYPE_P (type)) /* We're only concerned about pointers and references here. */; else if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */; else { if (VAR_P (expr)) { if (complain & tf_error) error ("%qD is not a valid template argument " "because %qD is a variable, not the address of " "a variable", expr, expr); return true; } else { if (complain & tf_error) error ("%qE is not a valid template argument for %qT " "because it is not the address of a variable", expr, type); return true; } } } return false; } /* The template arguments corresponding to template parameter objects of types that contain pointers to members. */ static GTY(()) hash_map<tree, tree> *tparm_obj_values; /* Return a VAR_DECL for the C++20 template parameter object corresponding to template argument EXPR. */ static tree get_template_parm_object (tree expr, tsubst_flags_t complain) { if (TREE_CODE (expr) == TARGET_EXPR) expr = TARGET_EXPR_INITIAL (expr); if (!TREE_CONSTANT (expr)) { if ((complain & tf_error) && require_rvalue_constant_expression (expr)) cxx_constant_value (expr); return error_mark_node; } if (invalid_tparm_referent_p (TREE_TYPE (expr), expr, complain)) return error_mark_node; tree name = mangle_template_parm_object (expr); tree decl = get_global_binding (name); if (decl) return decl; tree type = cp_build_qualified_type (TREE_TYPE (expr), TYPE_QUAL_CONST); decl = create_temporary_var (type); TREE_STATIC (decl) = true; DECL_DECLARED_CONSTEXPR_P (decl) = true; TREE_READONLY (decl) = true; DECL_NAME (decl) = name; SET_DECL_ASSEMBLER_NAME (decl, name); DECL_CONTEXT (decl) = global_namespace; comdat_linkage (decl); if (!zero_init_p (type)) { /* If EXPR contains any PTRMEM_CST, they will get clobbered by lower_var_init before we're done mangling. So store the original value elsewhere. */ tree copy = unshare_constructor (expr); hash_map_safe_put<hm_ggc> (tparm_obj_values, decl, copy); } pushdecl_top_level_and_finish (decl, expr); return decl; } /* Return the actual template argument corresponding to template parameter object VAR. */ tree tparm_object_argument (tree var) { if (zero_init_p (TREE_TYPE (var))) return DECL_INITIAL (var); return *(tparm_obj_values->get (var)); } /* Attempt to convert the non-type template parameter EXPR to the indicated TYPE. If the conversion is successful, return the converted value. If the conversion is unsuccessful, return NULL_TREE if we issued an error message, or error_mark_node if we did not. We issue error messages for out-and-out bad template parameters, but not simply because the conversion failed, since we might be just trying to do argument deduction. Both TYPE and EXPR must be non-dependent. The conversion follows the special rules described in [temp.arg.nontype], and it is much more strict than an implicit conversion. This function is called twice for each template argument (see lookup_template_class for a more accurate description of this problem). This means that we need to handle expressions which are not valid in a C++ source, but can be created from the first call (for instance, casts to perform conversions). These hacks can go away after we fix the double coercion problem. */ static tree convert_nontype_argument (tree type, tree expr, tsubst_flags_t complain) { tree expr_type; location_t loc = cp_expr_loc_or_input_loc (expr); /* Detect immediately string literals as invalid non-type argument. This special-case is not needed for correctness (we would easily catch this later), but only to provide better diagnostic for this common user mistake. As suggested by DR 100, we do not mention linkage issues in the diagnostic as this is not the point. */ if (TREE_CODE (expr) == STRING_CST && !CLASS_TYPE_P (type)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because string literals can never be used in this context", expr, type); return NULL_TREE; } /* Add the ADDR_EXPR now for the benefit of value_dependent_expression_p. */ if (TYPE_PTROBV_P (type) && TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } /* If we are in a template, EXPR may be non-dependent, but still have a syntactic, rather than semantic, form. For example, EXPR might be a SCOPE_REF, rather than the VAR_DECL to which the SCOPE_REF refers. Preserving the qualifying scope is necessary so that access checking can be performed when the template is instantiated -- but here we need the resolved form so that we can convert the argument. */ bool non_dep = false; if (TYPE_REF_OBJ_P (type) && has_value_dependent_address (expr)) /* If we want the address and it's value-dependent, don't fold. */; else if (processing_template_decl && is_nondependent_constant_expression (expr)) non_dep = true; if (error_operand_p (expr)) return error_mark_node; expr_type = TREE_TYPE (expr); /* If the argument is non-dependent, perform any conversions in non-dependent context as well. */ processing_template_decl_sentinel s (non_dep); if (non_dep) expr = instantiate_non_dependent_expr_internal (expr, complain); const bool val_dep_p = value_dependent_expression_p (expr); if (val_dep_p) expr = canonicalize_expr_argument (expr, complain); /* 14.3.2/5: The null pointer{,-to-member} conversion is applied to a non-type argument of "nullptr". */ if (NULLPTR_TYPE_P (expr_type) && TYPE_PTR_OR_PTRMEM_P (type)) expr = fold_simple (convert (type, expr)); /* In C++11, integral or enumeration non-type template arguments can be arbitrary constant expressions. Pointer and pointer to member arguments can be general constant expressions that evaluate to a null value, but otherwise still need to be of a specific form. */ if (cxx_dialect >= cxx11) { if (TREE_CODE (expr) == PTRMEM_CST && TYPE_PTRMEM_P (type)) /* A PTRMEM_CST is already constant, and a valid template argument for a parameter of pointer to member type, we just want to leave it in that form rather than lower it to a CONSTRUCTOR. */; else if (INTEGRAL_OR_ENUMERATION_TYPE_P (type) || cxx_dialect >= cxx17) { /* C++17: A template-argument for a non-type template-parameter shall be a converted constant expression (8.20) of the type of the template-parameter. */ expr = build_converted_constant_expr (type, expr, complain); if (expr == error_mark_node) /* Make sure we return NULL_TREE only if we have really issued an error, as described above. */ return (complain & tf_error) ? NULL_TREE : error_mark_node; else if (TREE_CODE (expr) == IMPLICIT_CONV_EXPR) { IMPLICIT_CONV_EXPR_NONTYPE_ARG (expr) = true; return expr; } expr = maybe_constant_value (expr, NULL_TREE, /*manifestly_const_eval=*/true); expr = convert_from_reference (expr); } else if (TYPE_PTR_OR_PTRMEM_P (type)) { tree folded = maybe_constant_value (expr, NULL_TREE, /*manifestly_const_eval=*/true); if (TYPE_PTR_P (type) ? integer_zerop (folded) : null_member_pointer_value_p (folded)) expr = folded; } } if (TYPE_REF_P (type)) expr = mark_lvalue_use (expr); else expr = mark_rvalue_use (expr); /* HACK: Due to double coercion, we can get a NOP_EXPR<REFERENCE_TYPE>(ADDR_EXPR<POINTER_TYPE> (arg)) here, which is the tree that we built on the first call (see below when coercing to reference to object or to reference to function). We just strip everything and get to the arg. See g++.old-deja/g++.oliva/template4.C and g++.dg/template/nontype9.C for examples. */ if (TYPE_REF_OBJ_P (type) || TYPE_REFFN_P (type)) { tree probe_type, probe = expr; if (REFERENCE_REF_P (probe)) probe = TREE_OPERAND (probe, 0); probe_type = TREE_TYPE (probe); if (TREE_CODE (probe) == NOP_EXPR) { /* ??? Maybe we could use convert_from_reference here, but we would need to relax its constraints because the NOP_EXPR could actually change the type to something more cv-qualified, and this is not folded by convert_from_reference. */ tree addr = TREE_OPERAND (probe, 0); if (TYPE_REF_P (probe_type) && TREE_CODE (addr) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (addr)) && (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (probe_type), TREE_TYPE (TREE_TYPE (addr))))) { expr = TREE_OPERAND (addr, 0); expr_type = TREE_TYPE (probe_type); } } } /* [temp.arg.nontype]/5, bullet 1 For a non-type template-parameter of integral or enumeration type, integral promotions (_conv.prom_) and integral conversions (_conv.integral_) are applied. */ if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)) { if (cxx_dialect < cxx11) { tree t = build_converted_constant_expr (type, expr, complain); t = maybe_constant_value (t); if (t != error_mark_node) expr = t; } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))) return error_mark_node; /* Notice that there are constant expressions like '4 % 0' which do not fold into integer constants. */ if (TREE_CODE (expr) != INTEGER_CST && !val_dep_p) { if (complain & tf_error) { int errs = errorcount, warns = warningcount + werrorcount; if (!require_potential_constant_expression (expr)) expr = error_mark_node; else expr = cxx_constant_value (expr); if (errorcount > errs || warningcount + werrorcount > warns) inform (loc, "in template argument for type %qT", type); if (expr == error_mark_node) return NULL_TREE; /* else cxx_constant_value complained but gave us a real constant, so go ahead. */ if (TREE_CODE (expr) != INTEGER_CST) { /* Some assemble time constant expressions like (intptr_t)&&lab1 - (intptr_t)&&lab2 or 4 + (intptr_t)&&var satisfy reduced_constant_expression_p as we can emit them into .rodata initializers of variables, yet they can't fold into an INTEGER_CST at compile time. Refuse them here. */ gcc_checking_assert (reduced_constant_expression_p (expr)); error_at (loc, "template argument %qE for type %qT not " "a constant integer", expr, type); return NULL_TREE; } } else return NULL_TREE; } /* Avoid typedef problems. */ if (TREE_TYPE (expr) != type) expr = fold_convert (type, expr); } /* [temp.arg.nontype]/5, bullet 2 For a non-type template-parameter of type pointer to object, qualification conversions (_conv.qual_) and the array-to-pointer conversion (_conv.array_) are applied. */ else if (TYPE_PTROBV_P (type)) { tree decayed = expr; /* Look through any NOP_EXPRs around an ADDR_EXPR, whether they come from decay_conversion or an explicit cast. If it's a problematic cast, we'll complain about it below. */ if (TREE_CODE (expr) == NOP_EXPR) { tree probe = expr; STRIP_NOPS (probe); if (TREE_CODE (probe) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (probe))) { expr = probe; expr_type = TREE_TYPE (expr); } } /* [temp.arg.nontype]/1 (TC1 version, DR 49): A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the name of a non-type template-parameter; -- the address of an object or function with external linkage, [...] expressed as "& id-expression" where the & is optional if the name refers to a function or array, or if the corresponding template-parameter is a reference. Here, we do not care about functions, as they are invalid anyway for a parameter of type pointer-to-object. */ if (val_dep_p) /* Non-type template parameters are OK. */ ; else if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */; else if (TREE_CODE (expr) != ADDR_EXPR && !INDIRECT_TYPE_P (expr_type)) /* Other values, like integer constants, might be valid non-type arguments of some other type. */ return error_mark_node; else if (invalid_tparm_referent_p (type, expr, complain)) return NULL_TREE; expr = decayed; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return error_mark_node; } /* [temp.arg.nontype]/5, bullet 3 For a non-type template-parameter of type reference to object, no conversions apply. The type referred to by the reference may be more cv-qualified than the (otherwise identical) type of the template-argument. The template-parameter is bound directly to the template-argument, which must be an lvalue. */ else if (TYPE_REF_OBJ_P (type)) { if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (type), expr_type)) return error_mark_node; if (!at_least_as_qualified_p (TREE_TYPE (type), expr_type)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because of conflicts in cv-qualification", expr, type); return NULL_TREE; } if (!lvalue_p (expr)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is not an lvalue", expr, type); return NULL_TREE; } /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external linkage. */ if (INDIRECT_REF_P (expr) && TYPE_REF_OBJ_P (TREE_TYPE (TREE_OPERAND (expr, 0)))) { expr = TREE_OPERAND (expr, 0); if (DECL_P (expr)) { if (complain & tf_error) error ("%q#D is not a valid template argument for type %qT " "because a reference variable does not have a constant " "address", expr, type); return NULL_TREE; } } if (TYPE_REF_OBJ_P (TREE_TYPE (expr)) && val_dep_p) /* OK, dependent reference. We don't want to ask whether a DECL is itself value-dependent, since what we want here is its address. */; else { expr = build_address (expr); if (invalid_tparm_referent_p (type, expr, complain)) return NULL_TREE; } if (!same_type_p (type, TREE_TYPE (expr))) expr = build_nop (type, expr); } /* [temp.arg.nontype]/5, bullet 4 For a non-type template-parameter of type pointer to function, only the function-to-pointer conversion (_conv.func_) is applied. If the template-argument represents a set of overloaded functions (or a pointer to such), the matching function is selected from the set (_over.over_). */ else if (TYPE_PTRFN_P (type)) { /* If the argument is a template-id, we might not have enough context information to decay the pointer. */ if (!type_unknown_p (expr_type)) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */ return perform_qualification_conversions (type, expr); expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 5 For a non-type template-parameter of type reference to function, no conversions apply. If the template-argument represents a set of overloaded functions, the matching function is selected from the set (_over.over_). */ else if (TYPE_REFFN_P (type)) { if (TREE_CODE (expr) == ADDR_EXPR) { if (complain & tf_error) { error ("%qE is not a valid template argument for type %qT " "because it is a pointer", expr, type); inform (input_location, "try using %qE instead", TREE_OPERAND (expr, 0)); } return NULL_TREE; } expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 6 For a non-type template-parameter of type pointer to member function, no conversions apply. If the template-argument represents a set of overloaded member functions, the matching member function is selected from the set (_over.over_). */ else if (TYPE_PTRMEMFUNC_P (type)) { expr = instantiate_type (type, expr, tf_none); if (expr == error_mark_node) return error_mark_node; /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!val_dep_p && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; /* Repeated conversion can't deal with a conversion that turns PTRMEM_CST into a CONSTRUCTOR, so build up a new PTRMEM_CST instead. */ if (fnptr_conv_p (type, TREE_TYPE (expr))) expr = make_ptrmem_cst (type, PTRMEM_CST_MEMBER (expr)); } /* [temp.arg.nontype]/5, bullet 7 For a non-type template-parameter of type pointer to data member, qualification conversions (_conv.qual_) are applied. */ else if (TYPE_PTRDATAMEM_P (type)) { /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!val_dep_p && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return expr; } else if (NULLPTR_TYPE_P (type)) { if (!NULLPTR_TYPE_P (TREE_TYPE (expr))) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is of type %qT", expr, type, TREE_TYPE (expr)); return NULL_TREE; } return expr; } else if (CLASS_TYPE_P (type)) { /* Replace the argument with a reference to the corresponding template parameter object. */ if (!val_dep_p) expr = get_template_parm_object (expr, complain); if (expr == error_mark_node) return NULL_TREE; } /* A template non-type parameter must be one of the above. */ else gcc_unreachable (); /* Sanity check: did we actually convert the argument to the right type? */ gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))); return convert_from_reference (expr); } /* Subroutine of coerce_template_template_parms, which returns 1 if PARM_PARM and ARG_PARM match using the rule for the template parameters of template template parameters. Both PARM and ARG are template parameters; the rest of the arguments are the same as for coerce_template_template_parms. */ static int coerce_template_template_parm (tree parm, tree arg, tsubst_flags_t complain, tree in_decl, tree outer_args) { if (arg == NULL_TREE || error_operand_p (arg) || parm == NULL_TREE || error_operand_p (parm)) return 0; if (TREE_CODE (arg) != TREE_CODE (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: /* We encounter instantiations of templates like template <template <template <class> class> class TT> class C; */ { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (!coerce_template_template_parms (parmparm, argparm, complain, in_decl, outer_args)) return 0; } /* Fall through. */ case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (arg)) && !TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; case PARM_DECL: /* The tsubst call is used to handle cases such as template <int> class C {}; template <class T, template <T> class TT> class D {}; D<int, C> d; i.e. the parameter list of TT depends on earlier parameters. */ if (!uses_template_parms (TREE_TYPE (arg))) { tree t = tsubst (TREE_TYPE (parm), outer_args, complain, in_decl); if (!uses_template_parms (t) && !same_type_p (t, TREE_TYPE (arg))) return 0; } if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; default: gcc_unreachable (); } return 1; } /* Coerce template argument list ARGLIST for use with template template-parameter TEMPL. */ static tree coerce_template_args_for_ttp (tree templ, tree arglist, tsubst_flags_t complain) { /* Consider an example where a template template parameter declared as template <class T, class U = std::allocator<T> > class TT The template parameter level of T and U are one level larger than of TT. To proper process the default argument of U, say when an instantiation `TT<int>' is seen, we need to build the full arguments containing {int} as the innermost level. Outer levels, available when not appearing as default template argument, can be obtained from the arguments of the enclosing template. Suppose that TT is later substituted with std::vector. The above instantiation is `TT<int, std::allocator<T> >' with TT at level 1, and T at level 2, while the template arguments at level 1 becomes {std::vector} and the inner level 2 is {int}. */ tree outer = DECL_CONTEXT (templ); if (outer) outer = generic_targs_for (outer); else if (current_template_parms) { /* This is an argument of the current template, so we haven't set DECL_CONTEXT yet. */ tree relevant_template_parms; /* Parameter levels that are greater than the level of the given template template parm are irrelevant. */ relevant_template_parms = current_template_parms; while (TMPL_PARMS_DEPTH (relevant_template_parms) != TEMPLATE_TYPE_LEVEL (TREE_TYPE (templ))) relevant_template_parms = TREE_CHAIN (relevant_template_parms); outer = template_parms_to_args (relevant_template_parms); } if (outer) arglist = add_to_template_args (outer, arglist); tree parmlist = DECL_INNERMOST_TEMPLATE_PARMS (templ); return coerce_template_parms (parmlist, arglist, templ, complain, /*require_all_args=*/true, /*use_default_args=*/true); } /* A cache of template template parameters with match-all default arguments. */ static GTY((deletable)) hash_map<tree,tree> *defaulted_ttp_cache; /* T is a bound template template-parameter. Copy its arguments into default arguments of the template template-parameter's template parameters. */ static tree add_defaults_to_ttp (tree otmpl) { if (tree *c = hash_map_safe_get (defaulted_ttp_cache, otmpl)) return *c; tree ntmpl = copy_node (otmpl); tree ntype = copy_node (TREE_TYPE (otmpl)); TYPE_STUB_DECL (ntype) = TYPE_NAME (ntype) = ntmpl; TYPE_MAIN_VARIANT (ntype) = ntype; TYPE_POINTER_TO (ntype) = TYPE_REFERENCE_TO (ntype) = NULL_TREE; TYPE_NAME (ntype) = ntmpl; SET_TYPE_STRUCTURAL_EQUALITY (ntype); tree idx = TEMPLATE_TYPE_PARM_INDEX (ntype) = copy_node (TEMPLATE_TYPE_PARM_INDEX (ntype)); TEMPLATE_PARM_DECL (idx) = ntmpl; TREE_TYPE (ntmpl) = TREE_TYPE (idx) = ntype; tree oparms = DECL_TEMPLATE_PARMS (otmpl); tree parms = DECL_TEMPLATE_PARMS (ntmpl) = copy_node (oparms); TREE_CHAIN (parms) = TREE_CHAIN (oparms); tree vec = TREE_VALUE (parms) = copy_node (TREE_VALUE (parms)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) { tree o = TREE_VEC_ELT (vec, i); if (!template_parameter_pack_p (TREE_VALUE (o))) { tree n = TREE_VEC_ELT (vec, i) = copy_node (o); TREE_PURPOSE (n) = any_targ_node; } } hash_map_safe_put<hm_ggc> (defaulted_ttp_cache, otmpl, ntmpl); return ntmpl; } /* ARG is a bound potential template template-argument, and PARGS is a list of arguments for the corresponding template template-parameter. Adjust PARGS as appropriate for application to ARG's template, and if ARG is a BOUND_TEMPLATE_TEMPLATE_PARM, possibly adjust it to add default template arguments to the template template parameter. */ static tree coerce_ttp_args_for_tta (tree& arg, tree pargs, tsubst_flags_t complain) { ++processing_template_decl; tree arg_tmpl = TYPE_TI_TEMPLATE (arg); if (DECL_TEMPLATE_TEMPLATE_PARM_P (arg_tmpl)) { /* When comparing two template template-parameters in partial ordering, rewrite the one currently being used as an argument to have default arguments for all parameters. */ arg_tmpl = add_defaults_to_ttp (arg_tmpl); pargs = coerce_template_args_for_ttp (arg_tmpl, pargs, complain); if (pargs != error_mark_node) arg = bind_template_template_parm (TREE_TYPE (arg_tmpl), TYPE_TI_ARGS (arg)); } else { tree aparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (arg_tmpl)); pargs = coerce_template_parms (aparms, pargs, arg_tmpl, complain, /*require_all*/true, /*use_default*/true); } --processing_template_decl; return pargs; } /* Subroutine of unify for the case when PARM is a BOUND_TEMPLATE_TEMPLATE_PARM. */ static int unify_bound_ttp_args (tree tparms, tree targs, tree parm, tree& arg, bool explain_p) { tree parmvec = TYPE_TI_ARGS (parm); tree argvec = INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (arg)); /* The template template parm might be variadic and the argument not, so flatten both argument lists. */ parmvec = expand_template_argument_pack (parmvec); argvec = expand_template_argument_pack (argvec); if (flag_new_ttp) { /* In keeping with P0522R0, adjust P's template arguments to apply to A's template; then flatten it again. */ tree nparmvec = coerce_ttp_args_for_tta (arg, parmvec, tf_none); nparmvec = expand_template_argument_pack (nparmvec); if (unify (tparms, targs, nparmvec, argvec, UNIFY_ALLOW_NONE, explain_p)) return 1; /* If the P0522 adjustment eliminated a pack expansion, deduce empty packs. */ if (flag_new_ttp && TREE_VEC_LENGTH (nparmvec) < TREE_VEC_LENGTH (parmvec) && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*sub*/true, explain_p)) return 1; } else { /* Deduce arguments T, i from TT<T> or TT<i>. We check each element of PARMVEC and ARGVEC individually rather than the whole TREE_VEC since they can have different number of elements, which is allowed under N2555. */ int len = TREE_VEC_LENGTH (parmvec); /* Check if the parameters end in a pack, making them variadic. */ int parm_variadic_p = 0; if (len > 0 && PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, len - 1))) parm_variadic_p = 1; for (int i = 0; i < len - parm_variadic_p; ++i) /* If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ if (PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, i))) return unify_success (explain_p); if (TREE_VEC_LENGTH (argvec) < len - parm_variadic_p) return unify_too_few_arguments (explain_p, TREE_VEC_LENGTH (argvec), len); for (int i = 0; i < len - parm_variadic_p; ++i) if (unify (tparms, targs, TREE_VEC_ELT (parmvec, i), TREE_VEC_ELT (argvec, i), UNIFY_ALLOW_NONE, explain_p)) return 1; if (parm_variadic_p && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, explain_p)) return 1; } return 0; } /* Return 1 if PARM_PARMS and ARG_PARMS matches using rule for template template parameters. Both PARM_PARMS and ARG_PARMS are vectors of TREE_LIST nodes containing TYPE_DECL, TEMPLATE_DECL or PARM_DECL. Consider the example: template <class T> class A; template<template <class U> class TT> class B; For B<A>, PARM_PARMS are the parameters to TT, while ARG_PARMS are the parameters to A, and OUTER_ARGS contains A. */ static int coerce_template_template_parms (tree parm_parms, tree arg_parms, tsubst_flags_t complain, tree in_decl, tree outer_args) { int nparms, nargs, i; tree parm, arg; int variadic_p = 0; gcc_assert (TREE_CODE (parm_parms) == TREE_VEC); gcc_assert (TREE_CODE (arg_parms) == TREE_VEC); nparms = TREE_VEC_LENGTH (parm_parms); nargs = TREE_VEC_LENGTH (arg_parms); if (flag_new_ttp) { /* P0522R0: A template template-parameter P is at least as specialized as a template template-argument A if, given the following rewrite to two function templates, the function template corresponding to P is at least as specialized as the function template corresponding to A according to the partial ordering rules for function templates ([temp.func.order]). Given an invented class template X with the template parameter list of A (including default arguments): * Each of the two function templates has the same template parameters, respectively, as P or A. * Each function template has a single function parameter whose type is a specialization of X with template arguments corresponding to the template parameters from the respective function template where, for each template parameter PP in the template parameter list of the function template, a corresponding template argument AA is formed. If PP declares a parameter pack, then AA is the pack expansion PP... ([temp.variadic]); otherwise, AA is the id-expression PP. If the rewrite produces an invalid type, then P is not at least as specialized as A. */ /* So coerce P's args to apply to A's parms, and then deduce between A's args and the converted args. If that succeeds, A is at least as specialized as P, so they match.*/ tree pargs = template_parms_level_to_args (parm_parms); pargs = add_outermost_template_args (outer_args, pargs); ++processing_template_decl; pargs = coerce_template_parms (arg_parms, pargs, NULL_TREE, tf_none, /*require_all*/true, /*use_default*/true); --processing_template_decl; if (pargs != error_mark_node) { tree targs = make_tree_vec (nargs); tree aargs = template_parms_level_to_args (arg_parms); if (!unify (arg_parms, targs, aargs, pargs, UNIFY_ALLOW_NONE, /*explain*/false)) return 1; } } /* Determine whether we have a parameter pack at the end of the template template parameter's template parameter list. */ if (TREE_VEC_ELT (parm_parms, nparms - 1) != error_mark_node) { parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, nparms - 1)); if (error_operand_p (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) variadic_p = 1; break; case PARM_DECL: if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) variadic_p = 1; break; default: gcc_unreachable (); } } if (nargs != nparms && !(variadic_p && nargs >= nparms - 1)) return 0; /* Check all of the template parameters except the parameter pack at the end (if any). */ for (i = 0; i < nparms - variadic_p; ++i) { if (TREE_VEC_ELT (parm_parms, i) == error_mark_node || TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } if (variadic_p) { /* Check each of the template parameters in the template argument against the template parameter pack at the end of the template template parameter. */ if (TREE_VEC_ELT (parm_parms, i) == error_mark_node) return 0; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); for (; i < nargs; ++i) { if (TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } } return 1; } /* Verifies that the deduced template arguments (in TARGS) for the template template parameters (in TPARMS) represent valid bindings, by comparing the template parameter list of each template argument to the template parameter list of its corresponding template template parameter, in accordance with DR150. This routine can only be called after all template arguments have been deduced. It will return TRUE if all of the template template parameter bindings are okay, FALSE otherwise. */ bool template_template_parm_bindings_ok_p (tree tparms, tree targs) { int i, ntparms = TREE_VEC_LENGTH (tparms); bool ret = true; /* We're dealing with template parms in this process. */ ++processing_template_decl; targs = INNERMOST_TEMPLATE_ARGS (targs); for (i = 0; i < ntparms; ++i) { tree tparm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); tree targ = TREE_VEC_ELT (targs, i); if (TREE_CODE (tparm) == TEMPLATE_DECL && targ) { tree packed_args = NULL_TREE; int idx, len = 1; if (ARGUMENT_PACK_P (targ)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (targ); len = TREE_VEC_LENGTH (packed_args); } for (idx = 0; idx < len; ++idx) { tree targ_parms = NULL_TREE; if (packed_args) /* Extract the next argument from the argument pack. */ targ = TREE_VEC_ELT (packed_args, idx); if (PACK_EXPANSION_P (targ)) /* Look at the pattern of the pack expansion. */ targ = PACK_EXPANSION_PATTERN (targ); /* Extract the template parameters from the template argument. */ if (TREE_CODE (targ) == TEMPLATE_DECL) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (targ); else if (TREE_CODE (targ) == TEMPLATE_TEMPLATE_PARM) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (TYPE_NAME (targ)); /* Verify that we can coerce the template template parameters from the template argument to the template parameter. This requires an exact match. */ if (targ_parms && !coerce_template_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tparm), targ_parms, tf_none, tparm, targs)) { ret = false; goto out; } } } } out: --processing_template_decl; return ret; } /* Since type attributes aren't mangled, we need to strip them from template type arguments. */ tree canonicalize_type_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node || arg == TYPE_CANONICAL (arg)) return arg; bool removed_attributes = false; tree canon = strip_typedefs (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes on template argument %qT", arg); return canon; } /* And from inside dependent non-type arguments like sizeof(Type). */ static tree canonicalize_expr_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node) return arg; bool removed_attributes = false; tree canon = strip_typedefs_expr (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes in template argument %qE", arg); return canon; } /* A template declaration can be substituted for a constrained template template parameter only when the argument is no more constrained than the parameter. */ static bool is_compatible_template_arg (tree parm, tree arg) { tree parm_cons = get_constraints (parm); /* For now, allow constrained template template arguments and unconstrained template template parameters. */ if (parm_cons == NULL_TREE) return true; /* If the template parameter is constrained, we need to rewrite its constraints in terms of the ARG's template parameters. This ensures that all of the template parameter types will have the same depth. Note that this is only valid when coerce_template_template_parm is true for the innermost template parameters of PARM and ARG. In other words, because coercion is successful, this conversion will be valid. */ tree new_args = NULL_TREE; if (parm_cons) { tree aparms = DECL_INNERMOST_TEMPLATE_PARMS (arg); new_args = template_parms_level_to_args (aparms); parm_cons = tsubst_constraint_info (parm_cons, new_args, tf_none, NULL_TREE); if (parm_cons == error_mark_node) return false; } return weakly_subsumes (parm_cons, new_args, arg); } // Convert a placeholder argument into a binding to the original // parameter. The original parameter is saved as the TREE_TYPE of // ARG. static inline tree convert_wildcard_argument (tree parm, tree arg) { TREE_TYPE (arg) = parm; return arg; } /* We can't fully resolve ARG given as a non-type template argument to TYPE, because one of them is dependent. But we need to represent the conversion for the benefit of cp_tree_equal. */ static tree maybe_convert_nontype_argument (tree type, tree arg) { /* Auto parms get no conversion. */ if (type_uses_auto (type)) return arg; /* We don't need or want to add this conversion now if we're going to use the argument for deduction. */ if (value_dependent_expression_p (arg)) return arg; type = cv_unqualified (type); tree argtype = TREE_TYPE (arg); if (same_type_p (type, argtype)) return arg; arg = build1 (IMPLICIT_CONV_EXPR, type, arg); IMPLICIT_CONV_EXPR_NONTYPE_ARG (arg) = true; return arg; } /* Convert the indicated template ARG as necessary to match the indicated template PARM. Returns the converted ARG, or error_mark_node if the conversion was unsuccessful. Error and warning messages are issued under control of COMPLAIN. This conversion is for the Ith parameter in the parameter list. ARGS is the full set of template arguments deduced so far. */ static tree convert_template_argument (tree parm, tree arg, tree args, tsubst_flags_t complain, int i, tree in_decl) { tree orig_arg; tree val; int is_type, requires_type, is_tmpl_type, requires_tmpl_type; if (parm == error_mark_node || error_operand_p (arg)) return error_mark_node; /* Trivially convert placeholders. */ if (TREE_CODE (arg) == WILDCARD_DECL) return convert_wildcard_argument (parm, arg); if (arg == any_targ_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_CODE (TREE_VALUE (arg)) == OFFSET_REF) { /* The template argument was the name of some member function. That's usually invalid, but static members are OK. In any case, grab the underlying fields/functions and issue an error later if required. */ TREE_TYPE (arg) = unknown_type_node; } orig_arg = arg; requires_tmpl_type = TREE_CODE (parm) == TEMPLATE_DECL; requires_type = (TREE_CODE (parm) == TYPE_DECL || requires_tmpl_type); /* When determining whether an argument pack expansion is a template, look at the pattern. */ if (PACK_EXPANSION_P (arg)) arg = PACK_EXPANSION_PATTERN (arg); /* Deal with an injected-class-name used as a template template arg. */ if (requires_tmpl_type && CLASS_TYPE_P (arg)) { tree t = maybe_get_template_decl_from_type_decl (TYPE_NAME (arg)); if (TREE_CODE (t) == TEMPLATE_DECL) { if (cxx_dialect >= cxx11) /* OK under DR 1004. */; else if (complain & tf_warning_or_error) pedwarn (input_location, OPT_Wpedantic, "injected-class-name %qD" " used as template template argument", TYPE_NAME (arg)); else if (flag_pedantic_errors) t = arg; arg = t; } } is_tmpl_type = ((TREE_CODE (arg) == TEMPLATE_DECL && TREE_CODE (DECL_TEMPLATE_RESULT (arg)) == TYPE_DECL) || (requires_tmpl_type && TREE_CODE (arg) == TYPE_ARGUMENT_PACK) || TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (is_tmpl_type && (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE)) arg = TYPE_STUB_DECL (arg); is_type = TYPE_P (arg) || is_tmpl_type; if (requires_type && ! is_type && TREE_CODE (arg) == SCOPE_REF && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_TYPE_PARM) { if (TREE_CODE (TREE_OPERAND (arg, 1)) == BIT_NOT_EXPR) { if (complain & tf_error) error ("invalid use of destructor %qE as a type", orig_arg); return error_mark_node; } permerror (input_location, "to refer to a type member of a template parameter, " "use %<typename %E%>", orig_arg); orig_arg = make_typename_type (TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1), typename_type, complain); arg = orig_arg; is_type = 1; } if (is_type != requires_type) { if (in_decl) { if (complain & tf_error) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_type) { /* The template argument is a type, but we're expecting an expression. */ inform (input_location, " expected a constant of type %qT, got %qT", TREE_TYPE (parm), (DECL_P (arg) ? DECL_NAME (arg) : orig_arg)); /* [temp.arg]/2: "In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter." So give the user a clue. */ if (TREE_CODE (arg) == FUNCTION_TYPE) inform (input_location, " ambiguous template argument " "for non-type template parameter is treated as " "function type"); } else if (requires_tmpl_type) inform (input_location, " expected a class template, got %qE", orig_arg); else inform (input_location, " expected a type, got %qE", orig_arg); } } return error_mark_node; } if (is_tmpl_type ^ requires_tmpl_type) { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_tmpl_type) inform (input_location, " expected a type, got %qT", DECL_NAME (arg)); else inform (input_location, " expected a class template, got %qT", orig_arg); } return error_mark_node; } if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg)) /* We already did the appropriate conversion when packing args. */ val = orig_arg; else if (is_type) { if (requires_tmpl_type) { if (TREE_CODE (TREE_TYPE (arg)) == UNBOUND_CLASS_TEMPLATE) /* The number of argument required is not known yet. Just accept it for now. */ val = orig_arg; else { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm; /* Strip alias templates that are equivalent to another template. */ arg = get_underlying_template (arg); argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (coerce_template_template_parms (parmparm, argparm, complain, in_decl, args)) { val = arg; /* TEMPLATE_TEMPLATE_PARM node is preferred over TEMPLATE_DECL. */ if (val != error_mark_node) { if (DECL_TEMPLATE_TEMPLATE_PARM_P (val)) val = TREE_TYPE (val); if (TREE_CODE (orig_arg) == TYPE_PACK_EXPANSION) val = make_pack_expansion (val, complain); } } else { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected a template of type %qD, got %qT", parm, orig_arg); } val = error_mark_node; } // Check that the constraints are compatible before allowing the // substitution. if (val != error_mark_node) if (!is_compatible_template_arg (parm, arg)) { if (in_decl && (complain & tf_error)) { error ("constraint mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected %qD but got %qD", parm, arg); } val = error_mark_node; } } } else val = orig_arg; /* We only form one instance of each template specialization. Therefore, if we use a non-canonical variant (i.e., a typedef), any future messages referring to the type will use the typedef, which is confusing if those future uses do not themselves also use the typedef. */ if (TYPE_P (val)) val = canonicalize_type_argument (val, complain); } else { tree t = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (get_template_parm_index (parm)) > TMPL_ARGS_DEPTH (args)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else if (tree a = type_uses_auto (t)) { t = do_auto_deduction (t, arg, a, complain, adc_unify, args); if (t == error_mark_node) return error_mark_node; } else t = tsubst (t, args, complain, in_decl); if (invalid_nontype_parm_type_p (t, complain)) return error_mark_node; if (t != TREE_TYPE (parm)) t = canonicalize_type_argument (t, complain); if (!type_dependent_expression_p (orig_arg) && !uses_template_parms (t)) /* We used to call digest_init here. However, digest_init will report errors, which we don't want when complain is zero. More importantly, digest_init will try too hard to convert things: for example, `0' should not be converted to pointer type at this point according to the standard. Accepting this is not merely an extension, since deciding whether or not these conversions can occur is part of determining which function template to call, or whether a given explicit argument specification is valid. */ val = convert_nontype_argument (t, orig_arg, complain); else { val = canonicalize_expr_argument (orig_arg, complain); val = maybe_convert_nontype_argument (t, val); } if (val == NULL_TREE) val = error_mark_node; else if (val == error_mark_node && (complain & tf_error)) error_at (cp_expr_loc_or_input_loc (orig_arg), "could not convert template argument %qE from %qT to %qT", orig_arg, TREE_TYPE (orig_arg), t); if (INDIRECT_REF_P (val)) { /* Reject template arguments that are references to built-in functions with no library fallbacks. */ const_tree inner = TREE_OPERAND (val, 0); const_tree innertype = TREE_TYPE (inner); if (innertype && TYPE_REF_P (innertype) && TREE_CODE (TREE_TYPE (innertype)) == FUNCTION_TYPE && TREE_OPERAND_LENGTH (inner) > 0 && reject_gcc_builtin (TREE_OPERAND (inner, 0))) return error_mark_node; } if (TREE_CODE (val) == SCOPE_REF) { /* Strip typedefs from the SCOPE_REF. */ tree type = canonicalize_type_argument (TREE_TYPE (val), complain); tree scope = canonicalize_type_argument (TREE_OPERAND (val, 0), complain); val = build_qualified_name (type, scope, TREE_OPERAND (val, 1), QUALIFIED_NAME_IS_TEMPLATE (val)); } } return val; } /* Coerces the remaining template arguments in INNER_ARGS (from ARG_IDX to the end) into the parameter pack at PARM_IDX in PARMS. Returns the coerced argument pack. PARM_IDX is the position of this parameter in the template parameter list. ARGS is the original template argument list. */ static tree coerce_template_parameter_pack (tree parms, int parm_idx, tree args, tree inner_args, int arg_idx, tree new_args, int* lost, tree in_decl, tsubst_flags_t complain) { tree parm = TREE_VEC_ELT (parms, parm_idx); int nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; tree packed_args; tree argument_pack; tree packed_parms = NULL_TREE; if (arg_idx > nargs) arg_idx = nargs; if (tree packs = fixed_parameter_pack_p (TREE_VALUE (parm))) { /* When the template parameter is a non-type template parameter pack or template template parameter pack whose type or template parameters use parameter packs, we know exactly how many arguments we are looking for. Build a vector of the instantiated decls for these template parameters in PACKED_PARMS. */ /* We can't use make_pack_expansion here because it would interpret a _DECL as a use rather than a declaration. */ tree decl = TREE_VALUE (parm); tree exp = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (exp, decl); PACK_EXPANSION_PARAMETER_PACKS (exp) = packs; SET_TYPE_STRUCTURAL_EQUALITY (exp); TREE_VEC_LENGTH (args)--; packed_parms = tsubst_pack_expansion (exp, args, complain, decl); TREE_VEC_LENGTH (args)++; if (packed_parms == error_mark_node) return error_mark_node; /* If we're doing a partial instantiation of a member template, verify that all of the types used for the non-type template parameter pack are, in fact, valid for non-type template parameters. */ if (arg_idx < nargs && PACK_EXPANSION_P (TREE_VEC_ELT (inner_args, arg_idx))) { int j, len = TREE_VEC_LENGTH (packed_parms); for (j = 0; j < len; ++j) { tree t = TREE_VEC_ELT (packed_parms, j); if (TREE_CODE (t) == PARM_DECL && invalid_nontype_parm_type_p (TREE_TYPE (t), complain)) return error_mark_node; } /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; } packed_args = make_tree_vec (TREE_VEC_LENGTH (packed_parms)); } /* Check if we have a placeholder pack, which indicates we're in the context of a introduction list. In that case we want to match this pack to the single placeholder. */ else if (arg_idx < nargs && TREE_CODE (TREE_VEC_ELT (inner_args, arg_idx)) == WILDCARD_DECL && WILDCARD_PACK_P (TREE_VEC_ELT (inner_args, arg_idx))) { nargs = arg_idx + 1; packed_args = make_tree_vec (1); } else packed_args = make_tree_vec (nargs - arg_idx); /* Convert the remaining arguments, which will be a part of the parameter pack "parm". */ int first_pack_arg = arg_idx; for (; arg_idx < nargs; ++arg_idx) { tree arg = TREE_VEC_ELT (inner_args, arg_idx); tree actual_parm = TREE_VALUE (parm); int pack_idx = arg_idx - first_pack_arg; if (packed_parms) { /* Once we've packed as many args as we have types, stop. */ if (pack_idx >= TREE_VEC_LENGTH (packed_parms)) break; else if (PACK_EXPANSION_P (arg)) /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; else actual_parm = TREE_VEC_ELT (packed_parms, pack_idx); } if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else arg = convert_template_argument (actual_parm, arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) (*lost)++; TREE_VEC_ELT (packed_args, pack_idx) = arg; } if (arg_idx - first_pack_arg < TREE_VEC_LENGTH (packed_args) && TREE_VEC_LENGTH (packed_args) > 0) { if (complain & tf_error) error ("wrong number of template arguments (%d, should be %d)", arg_idx - first_pack_arg, TREE_VEC_LENGTH (packed_args)); return error_mark_node; } if (TREE_CODE (TREE_VALUE (parm)) == TYPE_DECL || TREE_CODE (TREE_VALUE (parm)) == TEMPLATE_DECL) argument_pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { argument_pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (argument_pack) = 1; } SET_ARGUMENT_PACK_ARGS (argument_pack, packed_args); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (packed_args, TREE_VEC_LENGTH (packed_args)); return argument_pack; } /* Returns the number of pack expansions in the template argument vector ARGS. */ static int pack_expansion_args_count (tree args) { int i; int count = 0; if (args) for (i = 0; i < TREE_VEC_LENGTH (args); ++i) { tree elt = TREE_VEC_ELT (args, i); if (elt && PACK_EXPANSION_P (elt)) ++count; } return count; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are issued under control of COMPLAIN. If REQUIRE_ALL_ARGS is false, argument deduction will be performed for arguments not specified in ARGS. Otherwise, if USE_DEFAULT_ARGS is true, default arguments will be used to fill in unspecified arguments. If REQUIRE_ALL_ARGS is true, but USE_DEFAULT_ARGS is false, then all arguments must be specified in ARGS. */ static tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int nparms, nargs, parm_idx, arg_idx, lost = 0; tree orig_inner_args; tree inner_args; tree new_args; tree new_inner_args; /* When used as a boolean value, indicates whether this is a variadic template parameter list. Since it's an int, we can also subtract it from nparms to get the number of non-variadic parameters. */ int variadic_p = 0; int variadic_args_p = 0; int post_variadic_parms = 0; /* Adjustment to nparms for fixed parameter packs. */ int fixed_pack_adjust = 0; int fixed_packs = 0; int missing = 0; /* Likewise for parameters with default arguments. */ int default_p = 0; if (args == error_mark_node) return error_mark_node; nparms = TREE_VEC_LENGTH (parms); /* Determine if there are any parameter packs or default arguments. */ for (parm_idx = 0; parm_idx < nparms; ++parm_idx) { tree parm = TREE_VEC_ELT (parms, parm_idx); if (variadic_p) ++post_variadic_parms; if (template_parameter_pack_p (TREE_VALUE (parm))) ++variadic_p; if (TREE_PURPOSE (parm)) ++default_p; } inner_args = orig_inner_args = INNERMOST_TEMPLATE_ARGS (args); /* If there are no parameters that follow a parameter pack, we need to expand any argument packs so that we can deduce a parameter pack from some non-packed args followed by an argument pack, as in variadic85.C. If there are such parameters, we need to leave argument packs intact so the arguments are assigned properly. This can happen when dealing with a nested class inside a partial specialization of a class template, as in variadic92.C, or when deducing a template parameter pack from a sub-declarator, as in variadic114.C. */ if (!post_variadic_parms) inner_args = expand_template_argument_pack (inner_args); /* Count any pack expansion args. */ variadic_args_p = pack_expansion_args_count (inner_args); nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; if ((nargs - variadic_args_p > nparms && !variadic_p) || (nargs < nparms - variadic_p && require_all_args && !variadic_args_p && (!use_default_args || (TREE_VEC_ELT (parms, nargs) != error_mark_node && !TREE_PURPOSE (TREE_VEC_ELT (parms, nargs)))))) { bad_nargs: if (complain & tf_error) { if (variadic_p || default_p) { nparms -= variadic_p + default_p; error ("wrong number of template arguments " "(%d, should be at least %d)", nargs, nparms); } else error ("wrong number of template arguments " "(%d, should be %d)", nargs, nparms); if (in_decl) inform (DECL_SOURCE_LOCATION (in_decl), "provided for %qD", in_decl); } return error_mark_node; } /* We can't pass a pack expansion to a non-pack parameter of an alias template (DR 1430). */ else if (in_decl && (DECL_ALIAS_TEMPLATE_P (in_decl) || concept_definition_p (in_decl)) && variadic_args_p && nargs - variadic_args_p < nparms - variadic_p) { if (complain & tf_error) { for (int i = 0; i < TREE_VEC_LENGTH (inner_args); ++i) { tree arg = TREE_VEC_ELT (inner_args, i); tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (PACK_EXPANSION_P (arg) && !template_parameter_pack_p (parm)) { if (DECL_ALIAS_TEMPLATE_P (in_decl)) error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of alias template %qD", parm, in_decl); else error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of concept %qD", parm, in_decl); inform (DECL_SOURCE_LOCATION (parm), "declared here"); goto found; } } gcc_unreachable (); found:; } return error_mark_node; } /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ cp_evaluated ev; new_inner_args = make_tree_vec (nparms); new_args = add_outermost_template_args (args, new_inner_args); int pack_adjust = 0; for (parm_idx = 0, arg_idx = 0; parm_idx < nparms; parm_idx++, arg_idx++) { tree arg; tree parm; /* Get the Ith template parameter. */ parm = TREE_VEC_ELT (parms, parm_idx); if (parm == error_mark_node) { TREE_VEC_ELT (new_inner_args, arg_idx) = error_mark_node; continue; } /* Calculate the next argument. */ if (arg_idx < nargs) arg = TREE_VEC_ELT (inner_args, arg_idx); else arg = NULL_TREE; if (template_parameter_pack_p (TREE_VALUE (parm)) && (arg || require_all_args || !(complain & tf_partial)) && !(arg && ARGUMENT_PACK_P (arg))) { /* Some arguments will be placed in the template parameter pack PARM. */ arg = coerce_template_parameter_pack (parms, parm_idx, args, inner_args, arg_idx, new_args, &lost, in_decl, complain); if (arg == NULL_TREE) { /* We don't know how many args we have yet, just use the unconverted (and still packed) ones for now. */ new_inner_args = orig_inner_args; arg_idx = nargs; break; } TREE_VEC_ELT (new_inner_args, parm_idx) = arg; /* Store this argument. */ if (arg == error_mark_node) { lost++; /* We are done with all of the arguments. */ arg_idx = nargs; break; } else { pack_adjust = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)) - 1; arg_idx += pack_adjust; if (fixed_parameter_pack_p (TREE_VALUE (parm))) { ++fixed_packs; fixed_pack_adjust += pack_adjust; } } continue; } else if (arg) { if (PACK_EXPANSION_P (arg)) { /* "If every valid specialization of a variadic template requires an empty template parameter pack, the template is ill-formed, no diagnostic required." So check that the pattern works with this parameter. */ tree pattern = PACK_EXPANSION_PATTERN (arg); tree conv = convert_template_argument (TREE_VALUE (parm), pattern, new_args, complain, parm_idx, in_decl); if (conv == error_mark_node) { if (complain & tf_error) inform (input_location, "so any instantiation with a " "non-empty parameter pack would be ill-formed"); ++lost; } else if (TYPE_P (conv) && !TYPE_P (pattern)) /* Recover from missing typename. */ TREE_VEC_ELT (inner_args, arg_idx) = make_pack_expansion (conv, complain); /* We don't know how many args we have yet, just use the unconverted ones for now. */ new_inner_args = inner_args; arg_idx = nargs; break; } } else if (require_all_args) { /* There must be a default arg in this case. */ arg = tsubst_template_arg (TREE_PURPOSE (parm), new_args, complain, in_decl); /* The position of the first default template argument, is also the number of non-defaulted arguments in NEW_INNER_ARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, arg_idx - pack_adjust); } else break; if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else if (!arg) { /* This can occur if there was an error in the template parameter list itself (which we would already have reported) that we are trying to recover from, e.g., a class template with a parameter list such as template<typename..., typename> (cpp0x/variadic150.C). */ ++lost; /* This can also happen with a fixed parameter pack (71834). */ if (arg_idx >= nargs) ++missing; } else arg = convert_template_argument (TREE_VALUE (parm), arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) lost++; TREE_VEC_ELT (new_inner_args, arg_idx - pack_adjust) = arg; } if (missing || arg_idx < nargs - variadic_args_p) { /* If we had fixed parameter packs, we didn't know how many arguments we actually needed earlier; now we do. */ nparms += fixed_pack_adjust; variadic_p -= fixed_packs; goto bad_nargs; } if (arg_idx < nargs) { /* We had some pack expansion arguments that will only work if the packs are empty, but wait until instantiation time to complain. See variadic-ttp3.C. */ /* Except that we can't provide empty packs to alias templates or concepts when there are no corresponding parameters. Basically, we can get here with this: template<typename T> concept C = true; template<typename... Args> requires C<Args...> void f(); When parsing C<Args...>, we try to form a concept check of C<?, Args...>. Without the extra check for substituting an empty pack past the last parameter, we can accept the check as valid. FIXME: This may be valid for alias templates (but I doubt it). FIXME: The error could be better also. */ if (in_decl && concept_definition_p (in_decl)) { if (complain & tf_error) error_at (location_of (TREE_VEC_ELT (args, arg_idx)), "too many arguments"); return error_mark_node; } int len = nparms + (nargs - arg_idx); tree args = make_tree_vec (len); int i = 0; for (; i < nparms; ++i) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (new_inner_args, i); for (; i < len; ++i, ++arg_idx) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (inner_args, arg_idx - pack_adjust); new_inner_args = args; } if (lost) { gcc_assert (!(complain & tf_error) || seen_error ()); return error_mark_node; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, TREE_VEC_LENGTH (new_inner_args)); return new_inner_args; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are not issued. Note that no function argument deduction is performed, and default arguments are used to fill in unspecified arguments. */ tree coerce_template_parms (tree parms, tree args, tree in_decl) { return coerce_template_parms (parms, args, in_decl, tf_none, true, true); } /* Convert all template arguments to their appropriate type, and instantiate default arguments as needed. This returns a vector containing the innermost resulting template arguments, or error_mark_node if unsuccessful. */ tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain) { return coerce_template_parms (parms, args, in_decl, complain, true, true); } /* Like coerce_template_parms. If PARMS represents all template parameters levels, this function returns a vector of vectors representing all the resulting argument levels. Note that in this case, only the innermost arguments are coerced because the outermost ones are supposed to have been coerced already. Otherwise, if PARMS represents only (the innermost) vector of parameters, this function returns a vector containing just the innermost resulting arguments. */ static tree coerce_innermost_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int parms_depth = TMPL_PARMS_DEPTH (parms); int args_depth = TMPL_ARGS_DEPTH (args); tree coerced_args; if (parms_depth > 1) { coerced_args = make_tree_vec (parms_depth); tree level; int cur_depth; for (level = parms, cur_depth = parms_depth; parms_depth > 0 && level != NULL_TREE; level = TREE_CHAIN (level), --cur_depth) { tree l; if (cur_depth == args_depth) l = coerce_template_parms (TREE_VALUE (level), args, in_decl, complain, require_all_args, use_default_args); else l = TMPL_ARGS_LEVEL (args, cur_depth); if (l == error_mark_node) return error_mark_node; SET_TMPL_ARGS_LEVEL (coerced_args, cur_depth, l); } } else coerced_args = coerce_template_parms (INNERMOST_TEMPLATE_PARMS (parms), args, in_decl, complain, require_all_args, use_default_args); return coerced_args; } /* Returns true if T is a wrapper to make a C++20 template parameter object const. */ static bool class_nttp_const_wrapper_p (tree t) { if (cxx_dialect < cxx2a) return false; return (TREE_CODE (t) == VIEW_CONVERT_EXPR && CP_TYPE_CONST_P (TREE_TYPE (t)) && TREE_CODE (TREE_OPERAND (t, 0)) == TEMPLATE_PARM_INDEX); } /* Returns 1 if template args OT and NT are equivalent. */ int template_args_equal (tree ot, tree nt, bool partial_order /* = false */) { if (nt == ot) return 1; if (nt == NULL_TREE || ot == NULL_TREE) return false; if (nt == any_targ_node || ot == any_targ_node) return true; if (class_nttp_const_wrapper_p (nt)) nt = TREE_OPERAND (nt, 0); if (class_nttp_const_wrapper_p (ot)) ot = TREE_OPERAND (ot, 0); if (TREE_CODE (nt) == TREE_VEC) /* For member templates */ return TREE_CODE (ot) == TREE_VEC && comp_template_args (ot, nt); else if (PACK_EXPANSION_P (ot)) return (PACK_EXPANSION_P (nt) && template_args_equal (PACK_EXPANSION_PATTERN (ot), PACK_EXPANSION_PATTERN (nt)) && template_args_equal (PACK_EXPANSION_EXTRA_ARGS (ot), PACK_EXPANSION_EXTRA_ARGS (nt))); else if (ARGUMENT_PACK_P (ot) || ARGUMENT_PACK_P (nt)) return cp_tree_equal (ot, nt); else if (ot && TREE_CODE (ot) == ARGUMENT_PACK_SELECT) gcc_unreachable (); else if (TYPE_P (nt)) { if (!TYPE_P (ot)) return false; /* Don't treat an alias template specialization with dependent arguments as equivalent to its underlying type when used as a template argument; we need them to be distinct so that we substitute into the specialization arguments at instantiation time. And aliases can't be equivalent without being ==, so we don't need to look any deeper. During partial ordering, however, we need to treat them normally so that we can order uses of the same alias with different cv-qualification (79960). */ if (!partial_order && (TYPE_ALIAS_P (nt) || TYPE_ALIAS_P (ot))) return false; else return same_type_p (ot, nt); } else if (TREE_CODE (ot) == TREE_VEC || TYPE_P (ot)) return 0; else { /* Try to treat a template non-type argument that has been converted to the parameter type as equivalent to one that hasn't yet. */ for (enum tree_code code1 = TREE_CODE (ot); CONVERT_EXPR_CODE_P (code1) || code1 == NON_LVALUE_EXPR; code1 = TREE_CODE (ot)) ot = TREE_OPERAND (ot, 0); for (enum tree_code code2 = TREE_CODE (nt); CONVERT_EXPR_CODE_P (code2) || code2 == NON_LVALUE_EXPR; code2 = TREE_CODE (nt)) nt = TREE_OPERAND (nt, 0); return cp_tree_equal (ot, nt); } } /* Returns 1 iff the OLDARGS and NEWARGS are in fact identical sets of template arguments. Returns 0 otherwise, and updates OLDARG_PTR and NEWARG_PTR with the offending arguments if they are non-NULL. */ int comp_template_args (tree oldargs, tree newargs, tree *oldarg_ptr, tree *newarg_ptr, bool partial_order) { int i; if (oldargs == newargs) return 1; if (!oldargs || !newargs) return 0; if (TREE_VEC_LENGTH (oldargs) != TREE_VEC_LENGTH (newargs)) return 0; for (i = 0; i < TREE_VEC_LENGTH (oldargs); ++i) { tree nt = TREE_VEC_ELT (newargs, i); tree ot = TREE_VEC_ELT (oldargs, i); if (! template_args_equal (ot, nt, partial_order)) { if (oldarg_ptr != NULL) *oldarg_ptr = ot; if (newarg_ptr != NULL) *newarg_ptr = nt; return 0; } } return 1; } inline bool comp_template_args_porder (tree oargs, tree nargs) { return comp_template_args (oargs, nargs, NULL, NULL, true); } /* Implement a freelist interface for objects of type T. Head is a separate object, rather than a regular member, so that we can define it as a GTY deletable pointer, which is highly desirable. A data member could be declared that way, but then the containing object would implicitly get GTY((user)), which would prevent us from instantiating freelists as global objects. Although this way we can create freelist global objects, they're such thin wrappers that instantiating temporaries at every use loses nothing and saves permanent storage for the freelist object. Member functions next, anew, poison and reinit have default implementations that work for most of the types we're interested in, but if they don't work for some type, they should be explicitly specialized. See the comments before them for requirements, and the example specializations for the tree_list_freelist. */ template <typename T> class freelist { /* Return the next object in a chain. We could just do type punning, but if we access the object with its underlying type, we avoid strict-aliasing trouble. This needs only work between poison and reinit. */ static T *&next (T *obj) { return obj->next; } /* Return a newly allocated, uninitialized or minimally-initialized object of type T. Any initialization performed by anew should either remain across the life of the object and the execution of poison, or be redone by reinit. */ static T *anew () { return ggc_alloc<T> (); } /* Optionally scribble all over the bits holding the object, so that they become (mostly?) uninitialized memory. This is called while preparing to make the object part of the free list. */ static void poison (T *obj) { T *p ATTRIBUTE_UNUSED = obj; T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, sizeof (*p))); memset (p, 0xa5, sizeof (*p)); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, sizeof (*p))); /* Let valgrind know the next portion of the object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); } /* Bring an object that underwent at least one lifecycle after anew and before the most recent free and poison, back to a usable state, reinitializing whatever is needed for it to be functionally equivalent to an object just allocated and returned by anew. This may poison or clear the next field, used by freelist housekeeping after poison was called. */ static void reinit (T *obj) { T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING memset (q, 0xa5, sizeof (*q)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (*obj))); } /* Reference a GTY-deletable pointer that points to the first object in the free list proper. */ T *&head; public: /* Construct a freelist object chaining objects off of HEAD. */ freelist (T *&head) : head(head) {} /* Add OBJ to the free object list. The former head becomes OBJ's successor. */ void free (T *obj) { poison (obj); next (obj) = head; head = obj; } /* Take an object from the free list, if one is available, or allocate a new one. Objects taken from the free list should be regarded as filled with garbage, except for bits that are configured to be preserved across free and alloc. */ T *alloc () { if (head) { T *obj = head; head = next (head); reinit (obj); return obj; } else return anew (); } }; /* Explicitly specialize the interfaces for freelist<tree_node>: we want to allocate a TREE_LIST using the usual interface, and ensure TREE_CHAIN remains functional. Alas, we have to duplicate a bit of build_tree_list logic in reinit, so this could go out of sync. */ template <> inline tree & freelist<tree_node>::next (tree obj) { return TREE_CHAIN (obj); } template <> inline tree freelist<tree_node>::anew () { return build_tree_list (NULL, NULL); } template <> inline void freelist<tree_node>::poison (tree obj ATTRIBUTE_UNUSED) { int size ATTRIBUTE_UNUSED = sizeof (tree_list); tree p ATTRIBUTE_UNUSED = obj; tree_base *b ATTRIBUTE_UNUSED = &obj->base; tree *q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); memset (p, 0xa5, size); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); /* But we still want to use the TREE_CODE and TREE_CHAIN parts. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); #ifdef ENABLE_GC_CHECKING VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (b, sizeof (*b))); /* Keep TREE_CHAIN functional. */ TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } template <> inline void freelist<tree_node>::reinit (tree obj ATTRIBUTE_UNUSED) { tree_base *b ATTRIBUTE_UNUSED = &obj->base; #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); memset (obj, 0, sizeof (tree_list)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); #ifdef ENABLE_GC_CHECKING TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } /* Point to the first object in the TREE_LIST freelist. */ static GTY((deletable)) tree tree_list_freelist_head; /* Return the/an actual TREE_LIST freelist. */ static inline freelist<tree_node> tree_list_freelist () { return tree_list_freelist_head; } /* Point to the first object in the tinst_level freelist. */ static GTY((deletable)) tinst_level *tinst_level_freelist_head; /* Return the/an actual tinst_level freelist. */ static inline freelist<tinst_level> tinst_level_freelist () { return tinst_level_freelist_head; } /* Point to the first object in the pending_template freelist. */ static GTY((deletable)) pending_template *pending_template_freelist_head; /* Return the/an actual pending_template freelist. */ static inline freelist<pending_template> pending_template_freelist () { return pending_template_freelist_head; } /* Build the TREE_LIST object out of a split list, store it permanently, and return it. */ tree tinst_level::to_list () { gcc_assert (split_list_p ()); tree ret = tree_list_freelist ().alloc (); TREE_PURPOSE (ret) = tldcl; TREE_VALUE (ret) = targs; tldcl = ret; targs = NULL; gcc_assert (tree_list_p ()); return ret; } const unsigned short tinst_level::refcount_infinity; /* Increment OBJ's refcount unless it is already infinite. */ static tinst_level * inc_refcount_use (tinst_level *obj) { if (obj && obj->refcount != tinst_level::refcount_infinity) ++obj->refcount; return obj; } /* Release storage for OBJ and node, if it's a TREE_LIST. */ void tinst_level::free (tinst_level *obj) { if (obj->tree_list_p ()) tree_list_freelist ().free (obj->get_node ()); tinst_level_freelist ().free (obj); } /* Decrement OBJ's refcount if not infinite. If it reaches zero, release OBJ's DECL and OBJ, and start over with the tinst_level object that used to be referenced by OBJ's NEXT. */ static void dec_refcount_use (tinst_level *obj) { while (obj && obj->refcount != tinst_level::refcount_infinity && !--obj->refcount) { tinst_level *next = obj->next; tinst_level::free (obj); obj = next; } } /* Modify PTR so that it points to OBJ, adjusting the refcounts of OBJ and of the former PTR. Omitting the second argument is equivalent to passing (T*)NULL; this is allowed because passing the zero-valued integral constant NULL confuses type deduction and/or overload resolution. */ template <typename T> static void set_refcount_ptr (T *& ptr, T *obj = NULL) { T *save = ptr; ptr = inc_refcount_use (obj); dec_refcount_use (save); } static void add_pending_template (tree d) { tree ti = (TYPE_P (d) ? CLASSTYPE_TEMPLATE_INFO (d) : DECL_TEMPLATE_INFO (d)); struct pending_template *pt; int level; if (TI_PENDING_TEMPLATE_FLAG (ti)) return; /* We are called both from instantiate_decl, where we've already had a tinst_level pushed, and instantiate_template, where we haven't. Compensate. */ gcc_assert (TREE_CODE (d) != TREE_LIST); level = !current_tinst_level || current_tinst_level->maybe_get_node () != d; if (level) push_tinst_level (d); pt = pending_template_freelist ().alloc (); pt->next = NULL; pt->tinst = NULL; set_refcount_ptr (pt->tinst, current_tinst_level); if (last_pending_template) last_pending_template->next = pt; else pending_templates = pt; last_pending_template = pt; TI_PENDING_TEMPLATE_FLAG (ti) = 1; if (level) pop_tinst_level (); } /* Return a TEMPLATE_ID_EXPR corresponding to the indicated FNS and ARGLIST. Valid choices for FNS are given in the cp-tree.def documentation for TEMPLATE_ID_EXPR. */ tree lookup_template_function (tree fns, tree arglist) { if (fns == error_mark_node || arglist == error_mark_node) return error_mark_node; gcc_assert (!arglist || TREE_CODE (arglist) == TREE_VEC); if (!is_overloaded_fn (fns) && !identifier_p (fns)) { error ("%q#D is not a function template", fns); return error_mark_node; } if (BASELINK_P (fns)) { BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, BASELINK_FUNCTIONS (fns), arglist); return fns; } return build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, arglist); } /* Within the scope of a template class S<T>, the name S gets bound (in build_self_reference) to a TYPE_DECL for the class, not a TEMPLATE_DECL. If DECL is a TYPE_DECL for current_class_type, or one of its enclosing classes, and that type is a template, return the associated TEMPLATE_DECL. Otherwise, the original DECL is returned. Also handle the case when DECL is a TREE_LIST of ambiguous injected-class-names from different bases. */ tree maybe_get_template_decl_from_type_decl (tree decl) { if (decl == NULL_TREE) return decl; /* DR 176: A lookup that finds an injected-class-name (10.2 [class.member.lookup]) can result in an ambiguity in certain cases (for example, if it is found in more than one base class). If all of the injected-class-names that are found refer to specializations of the same class template, and if the name is followed by a template-argument-list, the reference refers to the class template itself and not a specialization thereof, and is not ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { tree t, tmpl = NULL_TREE; for (t = decl; t; t = TREE_CHAIN (t)) { tree elt = maybe_get_template_decl_from_type_decl (TREE_VALUE (t)); if (!tmpl) tmpl = elt; else if (tmpl != elt) break; } if (tmpl && t == NULL_TREE) return tmpl; else return decl; } return (decl != NULL_TREE && DECL_SELF_REFERENCE_P (decl) && CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (decl))) ? CLASSTYPE_TI_TEMPLATE (TREE_TYPE (decl)) : decl; } /* Given an IDENTIFIER_NODE (or type TEMPLATE_DECL) and a chain of parameters, find the desired type. D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments. IN_DECL, if non-NULL, is the template declaration we are trying to instantiate. If ENTERING_SCOPE is nonzero, we are about to enter the scope of the class we are looking up. Issue error and warning messages under control of COMPLAIN. If the template class is really a local class in a template function, then the FUNCTION_CONTEXT is the function in which it is being instantiated. ??? Note that this function is currently called *twice* for each template-id: the first time from the parser, while creating the incomplete type (finish_template_type), and the second type during the real instantiation (instantiate_template_class). This is surely something that we want to avoid. It also causes some problems with argument coercion (see convert_nontype_argument for more information on this). */ static tree lookup_template_class_1 (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree templ = NULL_TREE, parmlist; tree t; spec_entry **slot; spec_entry *entry; spec_entry elt; hashval_t hash; if (identifier_p (d1)) { tree value = innermost_non_namespace_value (d1); if (value && DECL_TEMPLATE_TEMPLATE_PARM_P (value)) templ = value; else { if (context) push_decl_namespace (context); templ = lookup_name (d1); templ = maybe_get_template_decl_from_type_decl (templ); if (context) pop_decl_namespace (); } if (templ) context = DECL_CONTEXT (templ); } else if (TREE_CODE (d1) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (d1))) { tree type = TREE_TYPE (d1); /* If we are declaring a constructor, say A<T>::A<T>, we will get an implicit typename for the second A. Deal with it. */ if (TREE_CODE (type) == TYPENAME_TYPE && TREE_TYPE (type)) type = TREE_TYPE (type); if (CLASSTYPE_TEMPLATE_INFO (type)) { templ = CLASSTYPE_TI_TEMPLATE (type); d1 = DECL_NAME (templ); } } else if (TREE_CODE (d1) == ENUMERAL_TYPE || (TYPE_P (d1) && MAYBE_CLASS_TYPE_P (d1))) { templ = TYPE_TI_TEMPLATE (d1); d1 = DECL_NAME (templ); } else if (DECL_TYPE_TEMPLATE_P (d1)) { templ = d1; d1 = DECL_NAME (templ); context = DECL_CONTEXT (templ); } else if (DECL_TEMPLATE_TEMPLATE_PARM_P (d1)) { templ = d1; d1 = DECL_NAME (templ); } /* Issue an error message if we didn't find a template. */ if (! templ) { if (complain & tf_error) error ("%qT is not a template", d1); return error_mark_node; } if (TREE_CODE (templ) != TEMPLATE_DECL /* Make sure it's a user visible template, if it was named by the user. */ || ((complain & tf_user) && !DECL_TEMPLATE_PARM_P (templ) && !PRIMARY_TEMPLATE_P (templ))) { if (complain & tf_error) { error ("non-template type %qT used as a template", d1); if (in_decl) error ("for template declaration %q+D", in_decl); } return error_mark_node; } complain &= ~tf_user; /* An alias that just changes the name of a template is equivalent to the other template, so if any of the arguments are pack expansions, strip the alias to avoid problems with a pack expansion passed to a non-pack alias template parameter (DR 1430). */ if (pack_expansion_args_count (INNERMOST_TEMPLATE_ARGS (arglist))) templ = get_underlying_template (templ); if (DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { tree parm; tree arglist2 = coerce_template_args_for_ttp (templ, arglist, complain); if (arglist2 == error_mark_node || (!uses_template_parms (arglist2) && check_instantiated_args (templ, arglist2, complain))) return error_mark_node; parm = bind_template_template_parm (TREE_TYPE (templ), arglist2); return parm; } else { tree template_type = TREE_TYPE (templ); tree gen_tmpl; tree type_decl; tree found = NULL_TREE; int arg_depth; int parm_depth; int is_dependent_type; int use_partial_inst_tmpl = false; if (template_type == error_mark_node) /* An error occurred while building the template TEMPL, and a diagnostic has most certainly been emitted for that already. Let's propagate that error. */ return error_mark_node; gen_tmpl = most_general_template (templ); parmlist = DECL_TEMPLATE_PARMS (gen_tmpl); parm_depth = TMPL_PARMS_DEPTH (parmlist); arg_depth = TMPL_ARGS_DEPTH (arglist); if (arg_depth == 1 && parm_depth > 1) { /* We've been given an incomplete set of template arguments. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; we will be called with an ARGLIST of `U*', but the TEMPLATE will be `template <class T> template <class U> struct S1<T>::S2'. We must fill in the missing arguments. */ tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (templ)); arglist = add_outermost_template_args (TI_ARGS (ti), arglist); arg_depth = TMPL_ARGS_DEPTH (arglist); } /* Now we should have enough arguments. */ gcc_assert (parm_depth == arg_depth); /* From here on, we're only interested in the most general template. */ /* Calculate the BOUND_ARGS. These will be the args that are actually tsubst'd into the definition to create the instantiation. */ arglist = coerce_innermost_template_parms (parmlist, arglist, gen_tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); if (arglist == error_mark_node) /* We were unable to bind the arguments. */ return error_mark_node; /* In the scope of a template class, explicit references to the template class refer to the type of the template, not any instantiation of it. For example, in: template <class T> class C { void f(C<T>); } the `C<T>' is just the same as `C'. Outside of the class, however, such a reference is an instantiation. */ if (entering_scope || !PRIMARY_TEMPLATE_P (gen_tmpl) || currently_open_class (template_type)) { tree tinfo = TYPE_TEMPLATE_INFO (template_type); if (tinfo && comp_template_args (TI_ARGS (tinfo), arglist)) return template_type; } /* If we already have this specialization, return it. */ elt.tmpl = gen_tmpl; elt.args = arglist; elt.spec = NULL_TREE; hash = spec_hasher::hash (&elt); entry = type_specializations->find_with_hash (&elt, hash); if (entry) return entry->spec; /* If the template's constraints are not satisfied, then we cannot form a valid type. Note that the check is deferred until after the hash lookup. This prevents redundant checks on previously instantiated specializations. */ if (flag_concepts && !DECL_ALIAS_TEMPLATE_P (gen_tmpl) && !constraints_satisfied_p (gen_tmpl, arglist)) { if (complain & tf_error) { auto_diagnostic_group d; error ("template constraint failure for %qD", gen_tmpl); diagnose_constraints (input_location, gen_tmpl, arglist); } return error_mark_node; } is_dependent_type = uses_template_parms (arglist); /* If the deduced arguments are invalid, then the binding failed. */ if (!is_dependent_type && check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (arglist), complain)) return error_mark_node; if (!is_dependent_type && !PRIMARY_TEMPLATE_P (gen_tmpl) && !LAMBDA_TYPE_P (TREE_TYPE (gen_tmpl)) && TREE_CODE (CP_DECL_CONTEXT (gen_tmpl)) == NAMESPACE_DECL) { found = xref_tag_from_type (TREE_TYPE (gen_tmpl), DECL_NAME (gen_tmpl), /*tag_scope=*/ts_global); return found; } context = DECL_CONTEXT (gen_tmpl); if (context && TYPE_P (context)) { context = tsubst_aggr_type (context, arglist, complain, in_decl, true); context = complete_type (context); } else context = tsubst (context, arglist, complain, in_decl); if (context == error_mark_node) return error_mark_node; if (!context) context = global_namespace; /* Create the type. */ if (DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { /* The user referred to a specialization of an alias template represented by GEN_TMPL. [temp.alias]/2 says: When a template-id refers to the specialization of an alias template, it is equivalent to the associated type obtained by substitution of its template-arguments for the template-parameters in the type-id of the alias template. */ t = tsubst (TREE_TYPE (gen_tmpl), arglist, complain, in_decl); /* Note that the call above (by indirectly calling register_specialization in tsubst_decl) registers the TYPE_DECL representing the specialization of the alias template. So next time someone substitutes ARGLIST for the template parms into the alias template (GEN_TMPL), she'll get that TYPE_DECL back. */ if (t == error_mark_node) return t; } else if (TREE_CODE (template_type) == ENUMERAL_TYPE) { if (!is_dependent_type) { set_current_access_from_decl (TYPE_NAME (template_type)); t = start_enum (TYPE_IDENTIFIER (template_type), NULL_TREE, tsubst (ENUM_UNDERLYING_TYPE (template_type), arglist, complain, in_decl), tsubst_attributes (TYPE_ATTRIBUTES (template_type), arglist, complain, in_decl), SCOPED_ENUM_P (template_type), NULL); if (t == error_mark_node) return t; } else { /* We don't want to call start_enum for this type, since the values for the enumeration constants may involve template parameters. And, no one should be interested in the enumeration constants for such a type. */ t = cxx_make_type (ENUMERAL_TYPE); SET_SCOPED_ENUM_P (t, SCOPED_ENUM_P (template_type)); } SET_OPAQUE_ENUM_P (t, OPAQUE_ENUM_P (template_type)); ENUM_FIXED_UNDERLYING_TYPE_P (t) = ENUM_FIXED_UNDERLYING_TYPE_P (template_type); } else if (CLASS_TYPE_P (template_type)) { /* Lambda closures are regenerated in tsubst_lambda_expr, not instantiated here. */ gcc_assert (!LAMBDA_TYPE_P (template_type)); t = make_class_type (TREE_CODE (template_type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (template_type); SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t); /* A local class. Make sure the decl gets registered properly. */ if (context == current_function_decl) if (pushtag (DECL_NAME (gen_tmpl), t, /*tag_scope=*/ts_current) == error_mark_node) return error_mark_node; if (comp_template_args (CLASSTYPE_TI_ARGS (template_type), arglist)) /* This instantiation is another name for the primary template type. Set the TYPE_CANONICAL field appropriately. */ TYPE_CANONICAL (t) = template_type; else if (any_template_arguments_need_structural_equality_p (arglist)) /* Some of the template arguments require structural equality testing, so this template class requires structural equality testing. */ SET_TYPE_STRUCTURAL_EQUALITY (t); } else gcc_unreachable (); /* If we called start_enum or pushtag above, this information will already be set up. */ if (!TYPE_NAME (t)) { TYPE_CONTEXT (t) = FROB_CONTEXT (context); type_decl = create_implicit_typedef (DECL_NAME (gen_tmpl), t); DECL_CONTEXT (type_decl) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (type_decl) = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (template_type)); } else type_decl = TYPE_NAME (t); if (CLASS_TYPE_P (template_type)) { TREE_PRIVATE (type_decl) = TREE_PRIVATE (TYPE_MAIN_DECL (template_type)); TREE_PROTECTED (type_decl) = TREE_PROTECTED (TYPE_MAIN_DECL (template_type)); if (CLASSTYPE_VISIBILITY_SPECIFIED (template_type)) { DECL_VISIBILITY_SPECIFIED (type_decl) = 1; DECL_VISIBILITY (type_decl) = CLASSTYPE_VISIBILITY (template_type); } } if (OVERLOAD_TYPE_P (t) && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { static const char *tags[] = {"abi_tag", "may_alias"}; for (unsigned ix = 0; ix != 2; ix++) { tree attributes = lookup_attribute (tags[ix], TYPE_ATTRIBUTES (template_type)); if (attributes) TYPE_ATTRIBUTES (t) = tree_cons (TREE_PURPOSE (attributes), TREE_VALUE (attributes), TYPE_ATTRIBUTES (t)); } } /* Let's consider the explicit specialization of a member of a class template specialization that is implicitly instantiated, e.g.: template<class T> struct S { template<class U> struct M {}; //#0 }; template<> template<> struct S<int>::M<char> //#1 { int i; }; [temp.expl.spec]/4 says this is valid. In this case, when we write: S<int>::M<char> m; M is instantiated from the CLASSTYPE_TI_TEMPLATE of #1, not from the one of #0. When we encounter #1, we want to store the partial instantiation of M (template<class T> S<int>::M<T>) in its CLASSTYPE_TI_TEMPLATE. For all cases other than this "explicit specialization of member of a class template", we just want to store the most general template into the CLASSTYPE_TI_TEMPLATE of M. This case of "explicit specialization of member of a class template" only happens when: 1/ the enclosing class is an instantiation of, and therefore not the same as, the context of the most general template, and 2/ we aren't looking at the partial instantiation itself, i.e. the innermost arguments are not the same as the innermost parms of the most general template. So it's only when 1/ and 2/ happens that we want to use the partial instantiation of the member template in lieu of its most general template. */ if (PRIMARY_TEMPLATE_P (gen_tmpl) && TMPL_ARGS_HAVE_MULTIPLE_LEVELS (arglist) /* the enclosing class must be an instantiation... */ && CLASS_TYPE_P (context) && !same_type_p (context, DECL_CONTEXT (gen_tmpl))) { TREE_VEC_LENGTH (arglist)--; ++processing_template_decl; tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (gen_tmpl)); tree partial_inst_args = tsubst (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)), arglist, complain, NULL_TREE); --processing_template_decl; TREE_VEC_LENGTH (arglist)++; if (partial_inst_args == error_mark_node) return error_mark_node; use_partial_inst_tmpl = /*...and we must not be looking at the partial instantiation itself. */ !comp_template_args (INNERMOST_TEMPLATE_ARGS (arglist), partial_inst_args); } if (!use_partial_inst_tmpl) /* This case is easy; there are no member templates involved. */ found = gen_tmpl; else { /* This is a full instantiation of a member template. Find the partial instantiation of which this is an instance. */ /* Temporarily reduce by one the number of levels in the ARGLIST so as to avoid comparing the last set of arguments. */ TREE_VEC_LENGTH (arglist)--; /* We don't use COMPLAIN in the following call because this isn't the immediate context of deduction. For instance, tf_partial could be set here as we might be at the beginning of template argument deduction when any explicitly specified template arguments are substituted into the function type. tf_partial could lead into trouble because we wouldn't find the partial instantiation that might have been created outside tf_partial context, because the levels of template parameters wouldn't match, because in a tf_partial context, tsubst doesn't reduce TEMPLATE_PARM_LEVEL. */ found = tsubst (gen_tmpl, arglist, tf_none, NULL_TREE); TREE_VEC_LENGTH (arglist)++; /* FOUND is either a proper class type, or an alias template specialization. In the later case, it's a TYPE_DECL, resulting from the substituting of arguments for parameters in the TYPE_DECL of the alias template done earlier. So be careful while getting the template of FOUND. */ found = (TREE_CODE (found) == TEMPLATE_DECL ? found : (TREE_CODE (found) == TYPE_DECL ? DECL_TI_TEMPLATE (found) : CLASSTYPE_TI_TEMPLATE (found))); if (DECL_CLASS_TEMPLATE_P (found) && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (found))) { /* If this partial instantiation is specialized, we want to use it for hash table lookup. */ elt.tmpl = found; elt.args = arglist = INNERMOST_TEMPLATE_ARGS (arglist); hash = spec_hasher::hash (&elt); } } // Build template info for the new specialization. SET_TYPE_TEMPLATE_INFO (t, build_template_info (found, arglist)); elt.spec = t; slot = type_specializations->find_slot_with_hash (&elt, hash, INSERT); gcc_checking_assert (*slot == NULL); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; /* Note this use of the partial instantiation so we can check it later in maybe_process_partial_specialization. */ DECL_TEMPLATE_INSTANTIATIONS (found) = tree_cons (arglist, t, DECL_TEMPLATE_INSTANTIATIONS (found)); if (TREE_CODE (template_type) == ENUMERAL_TYPE && !is_dependent_type && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) /* Now that the type has been registered on the instantiations list, we set up the enumerators. Because the enumeration constants may involve the enumeration type itself, we make sure to register the type first, and then create the constants. That way, doing tsubst_expr for the enumeration constants won't result in recursive calls here; we'll find the instantiation and exit above. */ tsubst_enum (template_type, t, arglist); if (CLASS_TYPE_P (template_type) && is_dependent_type) /* If the type makes use of template parameters, the code that generates debugging information will crash. */ DECL_IGNORED_P (TYPE_MAIN_DECL (t)) = 1; /* Possibly limit visibility based on template args. */ TREE_PUBLIC (type_decl) = 1; determine_visibility (type_decl); inherit_targ_abi_tags (t); return t; } } /* Wrapper for lookup_template_class_1. */ tree lookup_template_class (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = lookup_template_class_1 (d1, arglist, in_decl, context, entering_scope, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Return a TEMPLATE_ID_EXPR for the given variable template and ARGLIST. */ tree lookup_template_variable (tree templ, tree arglist) { if (flag_concepts && variable_concept_p (templ)) return build_concept_check (templ, arglist, tf_none); /* The type of the expression is NULL_TREE since the template-id could refer to an explicit or partial specialization. */ return build2 (TEMPLATE_ID_EXPR, NULL_TREE, templ, arglist); } /* Instantiate a variable declaration from a TEMPLATE_ID_EXPR for use. */ tree finish_template_variable (tree var, tsubst_flags_t complain) { tree templ = TREE_OPERAND (var, 0); tree arglist = TREE_OPERAND (var, 1); tree tmpl_args = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (templ)); arglist = add_outermost_template_args (tmpl_args, arglist); templ = most_general_template (templ); tree parms = DECL_TEMPLATE_PARMS (templ); arglist = coerce_innermost_template_parms (parms, arglist, templ, complain, /*req_all*/true, /*use_default*/true); if (flag_concepts && !constraints_satisfied_p (templ, arglist)) { if (complain & tf_error) { auto_diagnostic_group d; error ("use of invalid variable template %qE", var); diagnose_constraints (location_of (var), templ, arglist); } return error_mark_node; } return instantiate_template (templ, arglist, complain); } /* Construct a TEMPLATE_ID_EXPR for the given variable template TEMPL having TARGS template args, and instantiate it if it's not dependent. */ tree lookup_and_finish_template_variable (tree templ, tree targs, tsubst_flags_t complain) { templ = lookup_template_variable (templ, targs); if (!any_dependent_template_arguments_p (targs)) { templ = finish_template_variable (templ, complain); mark_used (templ); } return convert_from_reference (templ); } struct pair_fn_data { tree_fn_t fn; tree_fn_t any_fn; void *data; /* True when we should also visit template parameters that occur in non-deduced contexts. */ bool include_nondeduced_p; hash_set<tree> *visited; }; /* Called from for_each_template_parm via walk_tree. */ static tree for_each_template_parm_r (tree *tp, int *walk_subtrees, void *d) { tree t = *tp; struct pair_fn_data *pfd = (struct pair_fn_data *) d; tree_fn_t fn = pfd->fn; void *data = pfd->data; tree result = NULL_TREE; #define WALK_SUBTREE(NODE) \ do \ { \ result = for_each_template_parm (NODE, fn, data, pfd->visited, \ pfd->include_nondeduced_p, \ pfd->any_fn); \ if (result) goto out; \ } \ while (0) if (pfd->any_fn && (*pfd->any_fn)(t, data)) return t; if (TYPE_P (t) && (pfd->include_nondeduced_p || TREE_CODE (t) != TYPENAME_TYPE)) WALK_SUBTREE (TYPE_CONTEXT (t)); switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) break; /* Fall through. */ case UNION_TYPE: case ENUMERAL_TYPE: if (!TYPE_TEMPLATE_INFO (t)) *walk_subtrees = 0; else WALK_SUBTREE (TYPE_TI_ARGS (t)); break; case INTEGER_TYPE: WALK_SUBTREE (TYPE_MIN_VALUE (t)); WALK_SUBTREE (TYPE_MAX_VALUE (t)); break; case METHOD_TYPE: /* Since we're not going to walk subtrees, we have to do this explicitly here. */ WALK_SUBTREE (TYPE_METHOD_BASETYPE (t)); /* Fall through. */ case FUNCTION_TYPE: /* Check the return type. */ WALK_SUBTREE (TREE_TYPE (t)); /* Check the parameter types. Since default arguments are not instantiated until they are needed, the TYPE_ARG_TYPES may contain expressions that involve template parameters. But, no-one should be looking at them yet. And, once they're instantiated, they don't contain template parameters, so there's no point in looking at them then, either. */ { tree parm; for (parm = TYPE_ARG_TYPES (t); parm; parm = TREE_CHAIN (parm)) WALK_SUBTREE (TREE_VALUE (parm)); /* Since we've already handled the TYPE_ARG_TYPES, we don't want walk_tree walking into them itself. */ *walk_subtrees = 0; } if (flag_noexcept_type) { tree spec = TYPE_RAISES_EXCEPTIONS (t); if (spec) WALK_SUBTREE (TREE_PURPOSE (spec)); } break; case TYPEOF_TYPE: case DECLTYPE_TYPE: case UNDERLYING_TYPE: if (pfd->include_nondeduced_p && for_each_template_parm (TYPE_VALUES_RAW (t), fn, data, pfd->visited, pfd->include_nondeduced_p, pfd->any_fn)) return error_mark_node; *walk_subtrees = false; break; case FUNCTION_DECL: case VAR_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) WALK_SUBTREE (DECL_TI_ARGS (t)); /* Fall through. */ case PARM_DECL: case CONST_DECL: if (TREE_CODE (t) == CONST_DECL && DECL_TEMPLATE_PARM_P (t)) WALK_SUBTREE (DECL_INITIAL (t)); if (DECL_CONTEXT (t) && pfd->include_nondeduced_p) WALK_SUBTREE (DECL_CONTEXT (t)); break; case BOUND_TEMPLATE_TEMPLATE_PARM: /* Record template parameters such as `T' inside `TT<T>'. */ WALK_SUBTREE (TYPE_TI_ARGS (t)); /* Fall through. */ case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: if (fn && (*fn)(t, data)) return t; else if (!fn) return t; break; case TEMPLATE_DECL: /* A template template parameter is encountered. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) WALK_SUBTREE (TREE_TYPE (t)); /* Already substituted template template parameter */ *walk_subtrees = 0; break; case TYPENAME_TYPE: /* A template-id in a TYPENAME_TYPE might be a deduced context after partial instantiation. */ WALK_SUBTREE (TYPENAME_TYPE_FULLNAME (t)); break; case CONSTRUCTOR: if (TREE_TYPE (t) && TYPE_PTRMEMFUNC_P (TREE_TYPE (t)) && pfd->include_nondeduced_p) WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (t))); break; case INDIRECT_REF: case COMPONENT_REF: /* If there's no type, then this thing must be some expression involving template parameters. */ if (!fn && !TREE_TYPE (t)) return error_mark_node; break; case MODOP_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case ARROW_EXPR: case DOTSTAR_EXPR: case TYPEID_EXPR: case PSEUDO_DTOR_EXPR: if (!fn) return error_mark_node; break; case SCOPE_REF: if (pfd->include_nondeduced_p) WALK_SUBTREE (TREE_OPERAND (t, 0)); break; case REQUIRES_EXPR: { if (!fn) return error_mark_node; /* Recursively walk the type of each constraint variable. */ tree p = TREE_OPERAND (t, 0); while (p) { WALK_SUBTREE (TREE_TYPE (p)); p = TREE_CHAIN (p); } } break; default: break; } #undef WALK_SUBTREE /* We didn't find any template parameters we liked. */ out: return result; } /* For each TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, BOUND_TEMPLATE_TEMPLATE_PARM or TEMPLATE_PARM_INDEX in T, call FN with the parameter and the DATA. If FN returns nonzero, the iteration is terminated, and for_each_template_parm returns 1. Otherwise, the iteration continues. If FN never returns a nonzero value, the value returned by for_each_template_parm is 0. If FN is NULL, it is considered to be the function which always returns 1. If INCLUDE_NONDEDUCED_P, then this routine will also visit template parameters that occur in non-deduced contexts. When false, only visits those template parameters that can be deduced. */ static tree for_each_template_parm (tree t, tree_fn_t fn, void* data, hash_set<tree> *visited, bool include_nondeduced_p, tree_fn_t any_fn) { struct pair_fn_data pfd; tree result; /* Set up. */ pfd.fn = fn; pfd.any_fn = any_fn; pfd.data = data; pfd.include_nondeduced_p = include_nondeduced_p; /* Walk the tree. (Conceptually, we would like to walk without duplicates, but for_each_template_parm_r recursively calls for_each_template_parm, so we would need to reorganize a fair bit to use walk_tree_without_duplicates, so we keep our own visited list.) */ if (visited) pfd.visited = visited; else pfd.visited = new hash_set<tree>; result = cp_walk_tree (&t, for_each_template_parm_r, &pfd, pfd.visited); /* Clean up. */ if (!visited) { delete pfd.visited; pfd.visited = 0; } return result; } struct find_template_parameter_info { explicit find_template_parameter_info (tree ctx_parms) : parm_list (NULL_TREE), ctx_parms (ctx_parms), max_depth (TMPL_PARMS_DEPTH (ctx_parms)) {} hash_set<tree> visited; hash_set<tree> parms; tree parm_list; tree ctx_parms; int max_depth; }; /* Appends the declaration of T to the list in DATA. */ static int keep_template_parm (tree t, void* data) { find_template_parameter_info *ftpi = (find_template_parameter_info*)data; /* Template parameters declared within the expression are not part of the parameter mapping. For example, in this concept: template<typename T> concept C = requires { <expr> } -> same_as<int>; the return specifier same_as<int> declares a new decltype parameter that must not be part of the parameter mapping. The same is true for generic lambda parameters, lambda template parameters, etc. */ int level; int index; template_parm_level_and_index (t, &level, &index); if (level > ftpi->max_depth) return 0; if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM) /* We want the underlying TEMPLATE_TEMPLATE_PARM, not the BOUND_TEMPLATE_TEMPLATE_PARM itself. */ t = TREE_TYPE (TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (t)); /* Arguments like const T yield parameters like const T. This means that a template-id like X<T, const T> would yield two distinct parameters: T and const T. Adjust types to their unqualified versions. */ if (TYPE_P (t)) t = TYPE_MAIN_VARIANT (t); if (!ftpi->parms.add (t)) ftpi->parm_list = tree_cons (NULL_TREE, t, ftpi->parm_list); return 0; } /* Ensure that we recursively examine certain terms that are not normally visited in for_each_template_parm_r. */ static int any_template_parm_r (tree t, void *data) { find_template_parameter_info *ftpi = (find_template_parameter_info*)data; #define WALK_SUBTREE(NODE) \ do \ { \ for_each_template_parm (NODE, keep_template_parm, data, \ &ftpi->visited, true, \ any_template_parm_r); \ } \ while (0) /* A mention of a member alias/typedef is a use of all of its template arguments, including those from the enclosing class, so we don't use alias_template_specialization_p here. */ if (TYPE_P (t) && typedef_variant_p (t)) if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) WALK_SUBTREE (TI_ARGS (tinfo)); switch (TREE_CODE (t)) { case TEMPLATE_TYPE_PARM: /* Type constraints of a placeholder type may contain parameters. */ if (is_auto (t)) if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t)) WALK_SUBTREE (constr); break; case TEMPLATE_ID_EXPR: /* Search through references to variable templates. */ WALK_SUBTREE (TREE_OPERAND (t, 0)); WALK_SUBTREE (TREE_OPERAND (t, 1)); break; case TEMPLATE_PARM_INDEX: case PARM_DECL: /* A parameter or constraint variable may also depend on a template parameter without explicitly naming it. */ WALK_SUBTREE (TREE_TYPE (t)); break; case TEMPLATE_DECL: { /* If T is a member template that shares template parameters with ctx_parms, we need to mark all those parameters for mapping. */ tree dparms = DECL_TEMPLATE_PARMS (t); tree cparms = ftpi->ctx_parms; while (TMPL_PARMS_DEPTH (dparms) > ftpi->max_depth) dparms = TREE_CHAIN (dparms); while (TMPL_PARMS_DEPTH (cparms) > TMPL_PARMS_DEPTH (dparms)) cparms = TREE_CHAIN (cparms); while (dparms && (TREE_TYPE (TREE_VALUE (dparms)) != TREE_TYPE (TREE_VALUE (cparms)))) dparms = TREE_CHAIN (dparms), cparms = TREE_CHAIN (cparms); if (dparms) { int ddepth = TMPL_PARMS_DEPTH (dparms); tree dargs = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (t))); for (int i = 0; i < ddepth; ++i) WALK_SUBTREE (TMPL_ARGS_LEVEL (dargs, i+1)); } } break; case LAMBDA_EXPR: { /* Look in the parms and body. */ tree fn = lambda_function (t); WALK_SUBTREE (TREE_TYPE (fn)); WALK_SUBTREE (DECL_SAVED_TREE (fn)); } break; case IDENTIFIER_NODE: if (IDENTIFIER_CONV_OP_P (t)) /* The conversion-type-id of a conversion operator may be dependent. */ WALK_SUBTREE (TREE_TYPE (t)); break; default: break; } /* Keep walking. */ return 0; } /* Returns a list of unique template parameters found within T, where CTX_PARMS are the template parameters in scope. */ tree find_template_parameters (tree t, tree ctx_parms) { if (!ctx_parms) return NULL_TREE; find_template_parameter_info ftpi (ctx_parms); for_each_template_parm (t, keep_template_parm, &ftpi, &ftpi.visited, /*include_nondeduced*/true, any_template_parm_r); return ftpi.parm_list; } /* Returns true if T depends on any template parameter. */ int uses_template_parms (tree t) { if (t == NULL_TREE) return false; bool dependent_p; int saved_processing_template_decl; saved_processing_template_decl = processing_template_decl; if (!saved_processing_template_decl) processing_template_decl = 1; if (TYPE_P (t)) dependent_p = dependent_type_p (t); else if (TREE_CODE (t) == TREE_VEC) dependent_p = any_dependent_template_arguments_p (t); else if (TREE_CODE (t) == TREE_LIST) dependent_p = (uses_template_parms (TREE_VALUE (t)) || uses_template_parms (TREE_CHAIN (t))); else if (TREE_CODE (t) == TYPE_DECL) dependent_p = dependent_type_p (TREE_TYPE (t)); else if (t == error_mark_node) dependent_p = false; else dependent_p = value_dependent_expression_p (t); processing_template_decl = saved_processing_template_decl; return dependent_p; } /* Returns true iff current_function_decl is an incompletely instantiated template. Useful instead of processing_template_decl because the latter is set to 0 during instantiate_non_dependent_expr. */ bool in_template_function (void) { tree fn = current_function_decl; bool ret; ++processing_template_decl; ret = (fn && DECL_LANG_SPECIFIC (fn) && DECL_TEMPLATE_INFO (fn) && any_dependent_template_arguments_p (DECL_TI_ARGS (fn))); --processing_template_decl; return ret; } /* Returns true if T depends on any template parameter with level LEVEL. */ bool uses_template_parms_level (tree t, int level) { return for_each_template_parm (t, template_parm_this_level_p, &level, NULL, /*include_nondeduced_p=*/true); } /* Returns true if the signature of DECL depends on any template parameter from its enclosing class. */ bool uses_outer_template_parms (tree decl) { int depth = template_class_depth (CP_DECL_CONTEXT (decl)); if (depth == 0) return false; if (for_each_template_parm (TREE_TYPE (decl), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; if (PRIMARY_TEMPLATE_P (decl) && for_each_template_parm (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (decl)), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; tree ci = get_constraints (decl); if (ci) ci = CI_ASSOCIATED_CONSTRAINTS (ci); if (ci && for_each_template_parm (ci, template_parm_outer_level, &depth, NULL, /*nondeduced*/true)) return true; return false; } /* Returns TRUE iff INST is an instantiation we don't need to do in an ill-formed translation unit, i.e. a variable or function that isn't usable in a constant expression. */ static inline bool neglectable_inst_p (tree d) { return (d && DECL_P (d) && !undeduced_auto_decl (d) && !(TREE_CODE (d) == FUNCTION_DECL ? DECL_DECLARED_CONSTEXPR_P (d) : decl_maybe_constant_var_p (d))); } /* Returns TRUE iff we should refuse to instantiate DECL because it's neglectable and instantiated from within an erroneous instantiation. */ static bool limit_bad_template_recursion (tree decl) { struct tinst_level *lev = current_tinst_level; int errs = errorcount + sorrycount; if (lev == NULL || errs == 0 || !neglectable_inst_p (decl)) return false; for (; lev; lev = lev->next) if (neglectable_inst_p (lev->maybe_get_node ())) break; return (lev && errs > lev->errors); } static int tinst_depth; extern int max_tinst_depth; int depth_reached; static GTY(()) struct tinst_level *last_error_tinst_level; /* We're starting to instantiate D; record the template instantiation context at LOC for diagnostics and to restore it later. */ static bool push_tinst_level_loc (tree tldcl, tree targs, location_t loc) { struct tinst_level *new_level; if (tinst_depth >= max_tinst_depth) { /* Tell error.c not to try to instantiate any templates. */ at_eof = 2; fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " (use %<-ftemplate-depth=%> to increase the maximum)", max_tinst_depth); return false; } /* If the current instantiation caused problems, don't let it instantiate anything else. Do allow deduction substitution and decls usable in constant expressions. */ if (!targs && limit_bad_template_recursion (tldcl)) { /* Avoid no_linkage_errors and unused function warnings for this decl. */ TREE_NO_WARNING (tldcl) = 1; return false; } /* When not -quiet, dump template instantiations other than functions, since announce_function will take care of those. */ if (!quiet_flag && !targs && TREE_CODE (tldcl) != TREE_LIST && TREE_CODE (tldcl) != FUNCTION_DECL) fprintf (stderr, " %s", decl_as_string (tldcl, TFF_DECL_SPECIFIERS)); new_level = tinst_level_freelist ().alloc (); new_level->tldcl = tldcl; new_level->targs = targs; new_level->locus = loc; new_level->errors = errorcount + sorrycount; new_level->next = NULL; new_level->refcount = 0; set_refcount_ptr (new_level->next, current_tinst_level); set_refcount_ptr (current_tinst_level, new_level); ++tinst_depth; if (GATHER_STATISTICS && (tinst_depth > depth_reached)) depth_reached = tinst_depth; return true; } /* We're starting substitution of TMPL<ARGS>; record the template substitution context for diagnostics and to restore it later. */ static bool push_tinst_level (tree tmpl, tree args) { return push_tinst_level_loc (tmpl, args, input_location); } /* We're starting to instantiate D; record INPUT_LOCATION and the template instantiation context for diagnostics and to restore it later. */ bool push_tinst_level (tree d) { return push_tinst_level_loc (d, input_location); } /* Likewise, but record LOC as the program location. */ bool push_tinst_level_loc (tree d, location_t loc) { gcc_assert (TREE_CODE (d) != TREE_LIST); return push_tinst_level_loc (d, NULL, loc); } /* We're done instantiating this template; return to the instantiation context. */ void pop_tinst_level (void) { /* Restore the filename and line number stashed away when we started this instantiation. */ input_location = current_tinst_level->locus; set_refcount_ptr (current_tinst_level, current_tinst_level->next); --tinst_depth; } /* We're instantiating a deferred template; restore the template instantiation context in which the instantiation was requested, which is one step out from LEVEL. Return the corresponding DECL or TYPE. */ static tree reopen_tinst_level (struct tinst_level *level) { struct tinst_level *t; tinst_depth = 0; for (t = level; t; t = t->next) ++tinst_depth; set_refcount_ptr (current_tinst_level, level); pop_tinst_level (); if (current_tinst_level) current_tinst_level->errors = errorcount+sorrycount; return level->maybe_get_node (); } /* Returns the TINST_LEVEL which gives the original instantiation context. */ struct tinst_level * outermost_tinst_level (void) { struct tinst_level *level = current_tinst_level; if (level) while (level->next) level = level->next; return level; } /* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend declaration. */ static tree tsubst_friend_function (tree decl, tree args) { tree new_friend; if (TREE_CODE (decl) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATION (decl) && TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) /* This was a friend declared with an explicit template argument list, e.g.: friend void f<>(T); to indicate that f was a template instantiation, not a new function declaration. Now, we have to figure out what instantiation of what template. */ { tree template_id, arglist, fns; tree new_args; tree tmpl; tree ns = decl_namespace_context (TYPE_MAIN_DECL (current_class_type)); /* Friend functions are looked up in the containing namespace scope. We must enter that scope, to avoid finding member functions of the current class with same name. */ push_nested_namespace (ns); fns = tsubst_expr (DECL_TI_TEMPLATE (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); pop_nested_namespace (ns); arglist = tsubst (DECL_TI_ARGS (decl), args, tf_warning_or_error, NULL_TREE); template_id = lookup_template_function (fns, arglist); new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); tmpl = determine_specialization (template_id, new_friend, &new_args, /*need_member_template=*/0, TREE_VEC_LENGTH (args), tsk_none); return instantiate_template (tmpl, new_args, tf_error); } new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); /* The NEW_FRIEND will look like an instantiation, to the compiler, but is not an instantiation from the point of view of the language. For example, we might have had: template <class T> struct S { template <class U> friend void f(T, U); }; Then, in S<int>, template <class U> void f(int, U) is not an instantiation of anything. */ if (new_friend == error_mark_node) return error_mark_node; DECL_USE_TEMPLATE (new_friend) = 0; if (TREE_CODE (decl) == TEMPLATE_DECL) { DECL_USE_TEMPLATE (DECL_TEMPLATE_RESULT (new_friend)) = 0; DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (new_friend)) = DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (decl)); /* Substitute TEMPLATE_PARMS_CONSTRAINTS so that parameter levels will match in decls_match. */ tree parms = DECL_TEMPLATE_PARMS (new_friend); tree treqs = TEMPLATE_PARMS_CONSTRAINTS (parms); treqs = maybe_substitute_reqs_for (treqs, new_friend); TEMPLATE_PARMS_CONSTRAINTS (parms) = treqs; } /* The mangled name for the NEW_FRIEND is incorrect. The function is not a template instantiation and should not be mangled like one. Therefore, we forget the mangling here; we'll recompute it later if we need it. */ if (TREE_CODE (new_friend) != TEMPLATE_DECL) { SET_DECL_RTL (new_friend, NULL); SET_DECL_ASSEMBLER_NAME (new_friend, NULL_TREE); } if (DECL_NAMESPACE_SCOPE_P (new_friend)) { tree old_decl; tree new_friend_template_info; tree new_friend_result_template_info; tree ns; int new_friend_is_defn; /* We must save some information from NEW_FRIEND before calling duplicate decls since that function will free NEW_FRIEND if possible. */ new_friend_template_info = DECL_TEMPLATE_INFO (new_friend); new_friend_is_defn = (DECL_INITIAL (DECL_TEMPLATE_RESULT (template_for_substitution (new_friend))) != NULL_TREE); if (TREE_CODE (new_friend) == TEMPLATE_DECL) { /* This declaration is a `primary' template. */ DECL_PRIMARY_TEMPLATE (new_friend) = new_friend; new_friend_result_template_info = DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (new_friend)); } else new_friend_result_template_info = NULL_TREE; /* Inside pushdecl_namespace_level, we will push into the current namespace. However, the friend function should go into the namespace of the template. */ ns = decl_namespace_context (new_friend); push_nested_namespace (ns); old_decl = pushdecl_namespace_level (new_friend, /*is_friend=*/true); pop_nested_namespace (ns); if (old_decl == error_mark_node) return error_mark_node; if (old_decl != new_friend) { /* This new friend declaration matched an existing declaration. For example, given: template <class T> void f(T); template <class U> class C { template <class T> friend void f(T) {} }; the friend declaration actually provides the definition of `f', once C has been instantiated for some type. So, old_decl will be the out-of-class template declaration, while new_friend is the in-class definition. But, if `f' was called before this point, the instantiation of `f' will have DECL_TI_ARGS corresponding to `T' but not to `U', references to which might appear in the definition of `f'. Previously, the most general template for an instantiation of `f' was the out-of-class version; now it is the in-class version. Therefore, we run through all specialization of `f', adding to their DECL_TI_ARGS appropriately. In particular, they need a new set of outer arguments, corresponding to the arguments for this class instantiation. The same situation can arise with something like this: friend void f(int); template <class T> class C { friend void f(T) {} }; when `C<int>' is instantiated. Now, `f(int)' is defined in the class. */ if (!new_friend_is_defn) /* On the other hand, if the in-class declaration does *not* provide a definition, then we don't want to alter existing definitions. We can just leave everything alone. */ ; else { tree new_template = TI_TEMPLATE (new_friend_template_info); tree new_args = TI_ARGS (new_friend_template_info); /* Overwrite whatever template info was there before, if any, with the new template information pertaining to the declaration. */ DECL_TEMPLATE_INFO (old_decl) = new_friend_template_info; if (TREE_CODE (old_decl) != TEMPLATE_DECL) { /* We should have called reregister_specialization in duplicate_decls. */ gcc_assert (retrieve_specialization (new_template, new_args, 0) == old_decl); /* Instantiate it if the global has already been used. */ if (DECL_ODR_USED (old_decl)) instantiate_decl (old_decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } else { tree t; /* Indicate that the old function template is a partial instantiation. */ DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (old_decl)) = new_friend_result_template_info; gcc_assert (new_template == most_general_template (new_template)); gcc_assert (new_template != old_decl); /* Reassign any specializations already in the hash table to the new more general template, and add the additional template args. */ for (t = DECL_TEMPLATE_INSTANTIATIONS (old_decl); t != NULL_TREE; t = TREE_CHAIN (t)) { tree spec = TREE_VALUE (t); spec_entry elt; elt.tmpl = old_decl; elt.args = DECL_TI_ARGS (spec); elt.spec = NULL_TREE; decl_specializations->remove_elt (&elt); DECL_TI_ARGS (spec) = add_outermost_template_args (new_args, DECL_TI_ARGS (spec)); register_specialization (spec, new_template, DECL_TI_ARGS (spec), true, 0); } DECL_TEMPLATE_INSTANTIATIONS (old_decl) = NULL_TREE; } } /* The information from NEW_FRIEND has been merged into OLD_DECL by duplicate_decls. */ new_friend = old_decl; } } else { tree context = DECL_CONTEXT (new_friend); bool dependent_p; /* In the code template <class T> class C { template <class U> friend void C1<U>::f (); // case 1 friend void C2<T>::f (); // case 2 }; we only need to make sure CONTEXT is a complete type for case 2. To distinguish between the two cases, we note that CONTEXT of case 1 remains dependent type after tsubst while this isn't true for case 2. */ ++processing_template_decl; dependent_p = dependent_type_p (context); --processing_template_decl; if (!dependent_p && !complete_type_or_else (context, NULL_TREE)) return error_mark_node; if (COMPLETE_TYPE_P (context)) { tree fn = new_friend; /* do_friend adds the TEMPLATE_DECL for any member friend template even if it isn't a member template, i.e. template <class T> friend A<T>::f(); Look through it in that case. */ if (TREE_CODE (fn) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (fn)) fn = DECL_TEMPLATE_RESULT (fn); /* Check to see that the declaration is really present, and, possibly obtain an improved declaration. */ fn = check_classfn (context, fn, NULL_TREE); if (fn) new_friend = fn; } } return new_friend; } /* FRIEND_TMPL is a friend TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend type or error_mark_node on failure. */ static tree tsubst_friend_class (tree friend_tmpl, tree args) { tree tmpl; if (DECL_TEMPLATE_TEMPLATE_PARM_P (friend_tmpl)) { tmpl = tsubst (TREE_TYPE (friend_tmpl), args, tf_none, NULL_TREE); return TREE_TYPE (tmpl); } tree context = CP_DECL_CONTEXT (friend_tmpl); if (TREE_CODE (context) == NAMESPACE_DECL) push_nested_namespace (context); else { context = tsubst (context, args, tf_error, NULL_TREE); push_nested_class (context); } tmpl = lookup_name_real (DECL_NAME (friend_tmpl), /*prefer_type=*/false, /*non_class=*/false, /*block_p=*/false, /*namespaces_only=*/false, LOOKUP_HIDDEN); if (tmpl && DECL_CLASS_TEMPLATE_P (tmpl)) { /* The friend template has already been declared. Just check to see that the declarations match, and install any new default parameters. We must tsubst the default parameters, of course. We only need the innermost template parameters because that is all that redeclare_class_template will look at. */ if (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (friend_tmpl)) > TMPL_ARGS_DEPTH (args)) { tree parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_tmpl), args, tf_warning_or_error); location_t saved_input_location = input_location; input_location = DECL_SOURCE_LOCATION (friend_tmpl); tree cons = get_constraints (tmpl); redeclare_class_template (TREE_TYPE (tmpl), parms, cons); input_location = saved_input_location; } } else { /* The friend template has not already been declared. In this case, the instantiation of the template class will cause the injection of this template into the namespace scope. */ tmpl = tsubst (friend_tmpl, args, tf_warning_or_error, NULL_TREE); if (tmpl != error_mark_node) { /* The new TMPL is not an instantiation of anything, so we forget its origins. We don't reset CLASSTYPE_TI_TEMPLATE for the new type because that is supposed to be the corresponding template decl, i.e., TMPL. */ DECL_USE_TEMPLATE (tmpl) = 0; DECL_TEMPLATE_INFO (tmpl) = NULL_TREE; CLASSTYPE_USE_TEMPLATE (TREE_TYPE (tmpl)) = 0; CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl)) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl))); /* It is hidden. */ retrofit_lang_decl (DECL_TEMPLATE_RESULT (tmpl)); DECL_ANTICIPATED (tmpl) = DECL_ANTICIPATED (DECL_TEMPLATE_RESULT (tmpl)) = true; /* Substitute into and set the constraints on the new declaration. */ if (tree ci = get_constraints (friend_tmpl)) { ++processing_template_decl; ci = tsubst_constraint_info (ci, args, tf_warning_or_error, DECL_FRIEND_CONTEXT (friend_tmpl)); --processing_template_decl; set_constraints (tmpl, ci); } /* Inject this template into the enclosing namspace scope. */ tmpl = pushdecl_namespace_level (tmpl, true); } } if (TREE_CODE (context) == NAMESPACE_DECL) pop_nested_namespace (context); else pop_nested_class (); return TREE_TYPE (tmpl); } /* Returns zero if TYPE cannot be completed later due to circularity. Otherwise returns one. */ static int can_complete_type_without_circularity (tree type) { if (type == NULL_TREE || type == error_mark_node) return 0; else if (COMPLETE_TYPE_P (type)) return 1; else if (TREE_CODE (type) == ARRAY_TYPE) return can_complete_type_without_circularity (TREE_TYPE (type)); else if (CLASS_TYPE_P (type) && TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type))) return 0; else return 1; } static tree tsubst_omp_clauses (tree, enum c_omp_region_type, tree, tsubst_flags_t, tree); /* Instantiate a single dependent attribute T (a TREE_LIST), and return either T or a new TREE_LIST, possibly a chain in the case of a pack expansion. */ static tree tsubst_attribute (tree t, tree *decl_p, tree args, tsubst_flags_t complain, tree in_decl) { gcc_assert (ATTR_IS_DEPENDENT (t)); tree val = TREE_VALUE (t); if (val == NULL_TREE) /* Nothing to do. */; else if ((flag_openmp || flag_openmp_simd) && is_attribute_p ("omp declare simd", get_attribute_name (t))) { tree clauses = TREE_VALUE (val); clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args, complain, in_decl); c_omp_declare_simd_clauses_to_decls (*decl_p, clauses); clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); tree parms = DECL_ARGUMENTS (*decl_p); clauses = c_omp_declare_simd_clauses_to_numbers (parms, clauses); if (clauses) val = build_tree_list (NULL_TREE, clauses); else val = NULL_TREE; } else if (flag_openmp && is_attribute_p ("omp declare variant base", get_attribute_name (t))) { ++cp_unevaluated_operand; tree varid = tsubst_expr (TREE_PURPOSE (val), args, complain, in_decl, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; tree chain = TREE_CHAIN (val); location_t match_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (chain)); tree ctx = copy_list (TREE_VALUE (val)); tree simd = get_identifier ("simd"); tree score = get_identifier (" score"); tree condition = get_identifier ("condition"); for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1)) { const char *set = IDENTIFIER_POINTER (TREE_PURPOSE (t1)); TREE_VALUE (t1) = copy_list (TREE_VALUE (t1)); for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2)) { if (TREE_PURPOSE (t2) == simd && set[0] == 'c') { tree clauses = TREE_VALUE (t2); clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args, complain, in_decl); c_omp_declare_simd_clauses_to_decls (*decl_p, clauses); clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); TREE_VALUE (t2) = clauses; } else { TREE_VALUE (t2) = copy_list (TREE_VALUE (t2)); for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3)) if (TREE_VALUE (t3)) { bool allow_string = ((TREE_PURPOSE (t2) != condition || set[0] != 'u') && TREE_PURPOSE (t3) != score); tree v = TREE_VALUE (t3); if (TREE_CODE (v) == STRING_CST && allow_string) continue; v = tsubst_expr (v, args, complain, in_decl, true); v = fold_non_dependent_expr (v); if (!INTEGRAL_TYPE_P (TREE_TYPE (v)) || (TREE_PURPOSE (t3) == score ? TREE_CODE (v) != INTEGER_CST : !tree_fits_shwi_p (v))) { location_t loc = cp_expr_loc_or_loc (TREE_VALUE (t3), match_loc); if (TREE_PURPOSE (t3) == score) error_at (loc, "score argument must be " "constant integer expression"); else if (allow_string) error_at (loc, "property must be constant " "integer expression or string " "literal"); else error_at (loc, "property must be constant " "integer expression"); return NULL_TREE; } else if (TREE_PURPOSE (t3) == score && tree_int_cst_sgn (v) < 0) { location_t loc = cp_expr_loc_or_loc (TREE_VALUE (t3), match_loc); error_at (loc, "score argument must be " "non-negative"); return NULL_TREE; } TREE_VALUE (t3) = v; } } } } val = tree_cons (varid, ctx, chain); } /* If the first attribute argument is an identifier, don't pass it through tsubst. Attributes like mode, format, cleanup and several target specific attributes expect it unmodified. */ else if (attribute_takes_identifier_p (get_attribute_name (t))) { tree chain = tsubst_expr (TREE_CHAIN (val), args, complain, in_decl, /*integral_constant_expression_p=*/false); if (chain != TREE_CHAIN (val)) val = tree_cons (NULL_TREE, TREE_VALUE (val), chain); } else if (PACK_EXPANSION_P (val)) { /* An attribute pack expansion. */ tree purp = TREE_PURPOSE (t); tree pack = tsubst_pack_expansion (val, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; int len = TREE_VEC_LENGTH (pack); tree list = NULL_TREE; tree *q = &list; for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (pack, i); *q = build_tree_list (purp, elt); q = &TREE_CHAIN (*q); } return list; } else val = tsubst_expr (val, args, complain, in_decl, /*integral_constant_expression_p=*/false); if (val != TREE_VALUE (t)) return build_tree_list (TREE_PURPOSE (t), val); return t; } /* Instantiate any dependent attributes in ATTRIBUTES, returning either it unchanged or a new TREE_LIST chain. */ static tree tsubst_attributes (tree attributes, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; for (tree t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } if (last_dep) for (tree *p = &attributes; *p; ) { tree t = *p; if (ATTR_IS_DEPENDENT (t)) { tree subst = tsubst_attribute (t, NULL, args, complain, in_decl); if (subst != t) { *p = subst; while (*p) p = &TREE_CHAIN (*p); *p = TREE_CHAIN (t); continue; } } p = &TREE_CHAIN (*p); } return attributes; } /* Apply any attributes which had to be deferred until instantiation time. DECL_P, ATTRIBUTES and ATTR_FLAGS are as cplus_decl_attributes; ARGS, COMPLAIN, IN_DECL are as tsubst. */ static void apply_late_template_attributes (tree *decl_p, tree attributes, int attr_flags, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; tree t; tree *p; if (attributes == NULL_TREE) return; if (DECL_P (*decl_p)) { if (TREE_TYPE (*decl_p) == error_mark_node) return; p = &DECL_ATTRIBUTES (*decl_p); /* DECL_ATTRIBUTES comes from copy_node in tsubst_decl, and is identical to our attributes parameter. */ gcc_assert (*p == attributes); } else { p = &TYPE_ATTRIBUTES (*decl_p); /* TYPE_ATTRIBUTES was set up (with abi_tag and may_alias) in lookup_template_class_1, and should be preserved. */ gcc_assert (*p != attributes); while (*p) p = &TREE_CHAIN (*p); } for (t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } *p = attributes; if (last_dep) { tree late_attrs = NULL_TREE; tree *q = &late_attrs; for (; *p; ) { t = *p; if (ATTR_IS_DEPENDENT (t)) { *p = TREE_CHAIN (t); TREE_CHAIN (t) = NULL_TREE; *q = tsubst_attribute (t, decl_p, args, complain, in_decl); while (*q) q = &TREE_CHAIN (*q); } else p = &TREE_CHAIN (t); } cplus_decl_attributes (decl_p, late_attrs, attr_flags); } } /* Perform (or defer) access check for typedefs that were referenced from within the template TMPL code. This is a subroutine of instantiate_decl and instantiate_class_template. TMPL is the template to consider and TARGS is the list of arguments of that template. */ static void perform_typedefs_access_check (tree tmpl, tree targs) { unsigned i; qualified_typedef_usage_t *iter; if (!tmpl || (!CLASS_TYPE_P (tmpl) && TREE_CODE (tmpl) != FUNCTION_DECL)) return; FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (tmpl), i, iter) { tree type_decl = iter->typedef_decl; tree type_scope = iter->context; if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope)) continue; if (uses_template_parms (type_decl)) type_decl = tsubst (type_decl, targs, tf_error, NULL_TREE); if (uses_template_parms (type_scope)) type_scope = tsubst (type_scope, targs, tf_error, NULL_TREE); /* Make access check error messages point to the location of the use of the typedef. */ iloc_sentinel ils (iter->locus); perform_or_defer_access_check (TYPE_BINFO (type_scope), type_decl, type_decl, tf_warning_or_error); } } static tree instantiate_class_template_1 (tree type) { tree templ, args, pattern, t, member; tree typedecl; tree pbinfo; tree base_list; unsigned int saved_maximum_field_alignment; tree fn_context; if (type == error_mark_node) return error_mark_node; if (COMPLETE_OR_OPEN_TYPE_P (type) || uses_template_parms (type)) return type; /* Figure out which template is being instantiated. */ templ = most_general_template (CLASSTYPE_TI_TEMPLATE (type)); gcc_assert (TREE_CODE (templ) == TEMPLATE_DECL); /* Mark the type as in the process of being defined. */ TYPE_BEING_DEFINED (type) = 1; /* We may be in the middle of deferred access check. Disable it now. */ deferring_access_check_sentinel acs (dk_no_deferred); /* Determine what specialization of the original template to instantiate. */ t = most_specialized_partial_spec (type, tf_warning_or_error); if (t == error_mark_node) return error_mark_node; else if (t) { /* This TYPE is actually an instantiation of a partial specialization. We replace the innermost set of ARGS with the arguments appropriate for substitution. For example, given: template <class T> struct S {}; template <class T> struct S<T*> {}; and supposing that we are instantiating S<int*>, ARGS will presently be {int*} -- but we need {int}. */ pattern = TREE_TYPE (t); args = TREE_PURPOSE (t); } else { pattern = TREE_TYPE (templ); args = CLASSTYPE_TI_ARGS (type); } /* If the template we're instantiating is incomplete, then clearly there's nothing we can do. */ if (!COMPLETE_TYPE_P (pattern)) { /* We can try again later. */ TYPE_BEING_DEFINED (type) = 0; return type; } /* If we've recursively instantiated too many templates, stop. */ if (! push_tinst_level (type)) return type; int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; fn_context = decl_function_context (TYPE_MAIN_DECL (type)); /* Also avoid push_to_top_level for a lambda in an NSDMI. */ if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type)) fn_context = error_mark_node; if (!fn_context) push_to_top_level (); else { cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } /* Use #pragma pack from the template context. */ saved_maximum_field_alignment = maximum_field_alignment; maximum_field_alignment = TYPE_PRECISION (pattern); SET_CLASSTYPE_INTERFACE_UNKNOWN (type); /* Set the input location to the most specialized template definition. This is needed if tsubsting causes an error. */ typedecl = TYPE_MAIN_DECL (pattern); input_location = DECL_SOURCE_LOCATION (TYPE_NAME (type)) = DECL_SOURCE_LOCATION (typedecl); TYPE_PACKED (type) = TYPE_PACKED (pattern); SET_TYPE_ALIGN (type, TYPE_ALIGN (pattern)); TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (pattern); CLASSTYPE_NON_AGGREGATE (type) = CLASSTYPE_NON_AGGREGATE (pattern); if (ANON_AGGR_TYPE_P (pattern)) SET_ANON_AGGR_TYPE_P (type); if (CLASSTYPE_VISIBILITY_SPECIFIED (pattern)) { CLASSTYPE_VISIBILITY_SPECIFIED (type) = 1; CLASSTYPE_VISIBILITY (type) = CLASSTYPE_VISIBILITY (pattern); /* Adjust visibility for template arguments. */ determine_visibility (TYPE_MAIN_DECL (type)); } if (CLASS_TYPE_P (type)) CLASSTYPE_FINAL (type) = CLASSTYPE_FINAL (pattern); pbinfo = TYPE_BINFO (pattern); /* We should never instantiate a nested class before its enclosing class; we need to look up the nested class by name before we can instantiate it, and that lookup should instantiate the enclosing class. */ gcc_assert (!DECL_CLASS_SCOPE_P (TYPE_MAIN_DECL (pattern)) || COMPLETE_OR_OPEN_TYPE_P (TYPE_CONTEXT (type))); base_list = NULL_TREE; if (BINFO_N_BASE_BINFOS (pbinfo)) { tree pbase_binfo; tree pushed_scope; int i; /* We must enter the scope containing the type, as that is where the accessibility of types named in dependent bases are looked up from. */ pushed_scope = push_scope (CP_TYPE_CONTEXT (type)); /* Substitute into each of the bases to determine the actual basetypes. */ for (i = 0; BINFO_BASE_ITERATE (pbinfo, i, pbase_binfo); i++) { tree base; tree access = BINFO_BASE_ACCESS (pbinfo, i); tree expanded_bases = NULL_TREE; int idx, len = 1; if (PACK_EXPANSION_P (BINFO_TYPE (pbase_binfo))) { expanded_bases = tsubst_pack_expansion (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; len = TREE_VEC_LENGTH (expanded_bases); } for (idx = 0; idx < len; idx++) { if (expanded_bases) /* Extract the already-expanded base class. */ base = TREE_VEC_ELT (expanded_bases, idx); else /* Substitute to figure out the base class. */ base = tsubst (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (base == error_mark_node) continue; base_list = tree_cons (access, base, base_list); if (BINFO_VIRTUAL_P (pbase_binfo)) TREE_TYPE (base_list) = integer_type_node; } } /* The list is now in reverse order; correct that. */ base_list = nreverse (base_list); if (pushed_scope) pop_scope (pushed_scope); } /* Now call xref_basetypes to set up all the base-class information. */ xref_basetypes (type, base_list); apply_late_template_attributes (&type, TYPE_ATTRIBUTES (pattern), (int) ATTR_FLAG_TYPE_IN_PLACE, args, tf_error, NULL_TREE); fixup_attribute_variants (type); /* Now that our base classes are set up, enter the scope of the class, so that name lookups into base classes, etc. will work correctly. This is precisely analogous to what we do in begin_class_definition when defining an ordinary non-template class, except we also need to push the enclosing classes. */ push_nested_class (type); /* Now members are processed in the order of declaration. */ for (member = CLASSTYPE_DECL_LIST (pattern); member; member = TREE_CHAIN (member)) { tree t = TREE_VALUE (member); if (TREE_PURPOSE (member)) { if (TYPE_P (t)) { if (LAMBDA_TYPE_P (t)) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */ continue; /* Build new CLASSTYPE_NESTED_UTDS. */ tree newtag; bool class_template_p; class_template_p = (TREE_CODE (t) != ENUMERAL_TYPE && TYPE_LANG_SPECIFIC (t) && CLASSTYPE_IS_TEMPLATE (t)); /* If the member is a class template, then -- even after substitution -- there may be dependent types in the template argument list for the class. We increment PROCESSING_TEMPLATE_DECL so that dependent_type_p, as that function will assume that no types are dependent when outside of a template. */ if (class_template_p) ++processing_template_decl; newtag = tsubst (t, args, tf_error, NULL_TREE); if (class_template_p) --processing_template_decl; if (newtag == error_mark_node) continue; if (TREE_CODE (newtag) != ENUMERAL_TYPE) { tree name = TYPE_IDENTIFIER (t); if (class_template_p) /* Unfortunately, lookup_template_class sets CLASSTYPE_IMPLICIT_INSTANTIATION for a partial instantiation (i.e., for the type of a member template class nested within a template class.) This behavior is required for maybe_process_partial_specialization to work correctly, but is not accurate in this case; the TAG is not an instantiation of anything. (The corresponding TEMPLATE_DECL is an instantiation, but the TYPE is not.) */ CLASSTYPE_USE_TEMPLATE (newtag) = 0; /* Now, we call pushtag to put this NEWTAG into the scope of TYPE. We first set up the IDENTIFIER_TYPE_VALUE to avoid pushtag calling push_template_decl. We don't have to do this for enums because it will already have been done in tsubst_enum. */ if (name) SET_IDENTIFIER_TYPE_VALUE (name, newtag); pushtag (name, newtag, /*tag_scope=*/ts_current); } } else if (DECL_DECLARES_FUNCTION_P (t)) { tree r; if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; set_current_access_from_decl (r); finish_member_declaration (r); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); if (TREE_CODE (r) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (r)) cp_check_omp_declare_reduction (r); } else if ((DECL_CLASS_TEMPLATE_P (t) || DECL_IMPLICIT_TYPEDEF_P (t)) && LAMBDA_TYPE_P (TREE_TYPE (t))) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */; else { /* Build new TYPE_FIELDS. */ if (TREE_CODE (t) == STATIC_ASSERT) { tree condition; ++c_inhibit_evaluation_warnings; condition = tsubst_expr (STATIC_ASSERT_CONDITION (t), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); --c_inhibit_evaluation_warnings; finish_static_assert (condition, STATIC_ASSERT_MESSAGE (t), STATIC_ASSERT_SOURCE_LOCATION (t), /*member_p=*/true); } else if (TREE_CODE (t) != CONST_DECL) { tree r; tree vec = NULL_TREE; int len = 1; /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; if (TREE_CODE (r) == TREE_VEC) { /* A capture pack became multiple fields. */ vec = r; len = TREE_VEC_LENGTH (vec); } for (int i = 0; i < len; ++i) { if (vec) r = TREE_VEC_ELT (vec, i); if (VAR_P (r)) { /* In [temp.inst]: [t]he initialization (and any associated side-effects) of a static data member does not occur unless the static data member is itself used in a way that requires the definition of the static data member to exist. Therefore, we do not substitute into the initialized for the static data member here. */ finish_static_data_member_decl (r, /*init=*/NULL_TREE, /*init_const_expr_p=*/false, /*asmspec_tree=*/NULL_TREE, /*flags=*/0); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); } else if (TREE_CODE (r) == FIELD_DECL) { /* Determine whether R has a valid type and can be completed later. If R is invalid, then its type is replaced by error_mark_node. */ tree rtype = TREE_TYPE (r); if (can_complete_type_without_circularity (rtype)) complete_type (rtype); if (!complete_or_array_type_p (rtype)) { /* If R's type couldn't be completed and it isn't a flexible array member (whose type is incomplete by definition) give an error. */ cxx_incomplete_type_error (r, rtype); TREE_TYPE (r) = error_mark_node; } else if (TREE_CODE (rtype) == ARRAY_TYPE && TYPE_DOMAIN (rtype) == NULL_TREE && (TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE)) { error ("flexible array member %qD in union", r); TREE_TYPE (r) = error_mark_node; } else if (!verify_type_context (input_location, TCTX_FIELD, rtype)) TREE_TYPE (r) = error_mark_node; } /* If it is a TYPE_DECL for a class-scoped ENUMERAL_TYPE, such a thing will already have been added to the field list by tsubst_enum in finish_member_declaration in the CLASSTYPE_NESTED_UTDS case above. */ if (!(TREE_CODE (r) == TYPE_DECL && TREE_CODE (TREE_TYPE (r)) == ENUMERAL_TYPE && DECL_ARTIFICIAL (r))) { set_current_access_from_decl (r); finish_member_declaration (r); } } } } } else { if (TYPE_P (t) || DECL_CLASS_TEMPLATE_P (t) || DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Build new CLASSTYPE_FRIEND_CLASSES. */ tree friend_type = t; bool adjust_processing_template_decl = false; if (TREE_CODE (friend_type) == TEMPLATE_DECL) { /* template <class T> friend class C; */ friend_type = tsubst_friend_class (friend_type, args); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == UNBOUND_CLASS_TEMPLATE) { /* template <class T> friend class C::D; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (friend_type) == TEMPLATE_DECL) friend_type = TREE_TYPE (friend_type); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == TYPENAME_TYPE || TREE_CODE (friend_type) == TEMPLATE_TYPE_PARM) { /* This could be either friend class T::C; when dependent_type_p is false or template <class U> friend class T::C; otherwise. */ /* Bump processing_template_decl in case this is something like template <class T> friend struct A<T>::B. */ ++processing_template_decl; friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (dependent_type_p (friend_type)) adjust_processing_template_decl = true; --processing_template_decl; } else if (TREE_CODE (friend_type) != BOUND_TEMPLATE_TEMPLATE_PARM && !CLASSTYPE_USE_TEMPLATE (friend_type) && TYPE_HIDDEN_P (friend_type)) { /* friend class C; where C hasn't been declared yet. Let's lookup name from namespace scope directly, bypassing any name that come from dependent base class. */ tree ns = decl_namespace_context (TYPE_MAIN_DECL (friend_type)); /* The call to xref_tag_from_type does injection for friend classes. */ push_nested_namespace (ns); friend_type = xref_tag_from_type (friend_type, NULL_TREE, /*tag_scope=*/ts_current); pop_nested_namespace (ns); } else if (uses_template_parms (friend_type)) /* friend class C<T>; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); /* Otherwise it's friend class C; where C is already declared or friend class C<int>; We don't have to do anything in these cases. */ if (adjust_processing_template_decl) /* Trick make_friend_class into realizing that the friend we're adding is a template, not an ordinary class. It's important that we use make_friend_class since it will perform some error-checking and output cross-reference information. */ ++processing_template_decl; if (friend_type != error_mark_node) make_friend_class (type, friend_type, /*complain=*/false); if (adjust_processing_template_decl) --processing_template_decl; } else { /* Build new DECL_FRIENDLIST. */ tree r; /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) { ++processing_template_decl; push_deferring_access_checks (dk_no_check); } r = tsubst_friend_function (t, args); add_friend (type, r, /*complain=*/false); if (TREE_CODE (t) == TEMPLATE_DECL) { pop_deferring_access_checks (); --processing_template_decl; } } } } if (fn_context) { /* Restore these before substituting into the lambda capture initializers. */ cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; } /* Set the file and line number information to whatever is given for the class itself. This puts error messages involving generated implicit functions at a predictable point, and the same point that would be used for non-template classes. */ input_location = DECL_SOURCE_LOCATION (typedecl); unreverse_member_declarations (type); finish_struct_1 (type); TYPE_BEING_DEFINED (type) = 0; /* We don't instantiate default arguments for member functions. 14.7.1: The implicit instantiation of a class template specialization causes the implicit instantiation of the declarations, but not of the definitions or default arguments, of the class member functions, member classes, static data members and member templates.... */ /* Some typedefs referenced from within the template code need to be access checked at template instantiation time, i.e now. These types were added to the template at parsing time. Let's get those and perform the access checks then. */ perform_typedefs_access_check (pattern, args); perform_deferred_access_checks (tf_warning_or_error); pop_nested_class (); maximum_field_alignment = saved_maximum_field_alignment; if (!fn_context) pop_from_top_level (); pop_tinst_level (); /* The vtable for a template class can be emitted in any translation unit in which the class is instantiated. When there is no key method, however, finish_struct_1 will already have added TYPE to the keyed_classes. */ if (TYPE_CONTAINS_VPTR_P (type) && CLASSTYPE_KEY_METHOD (type)) vec_safe_push (keyed_classes, type); return type; } /* Wrapper for instantiate_class_template_1. */ tree instantiate_class_template (tree type) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_class_template_1 (type); timevar_pop (TV_TEMPLATE_INST); return ret; } tree tsubst_template_arg (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree r; if (!t) r = t; else if (TYPE_P (t)) r = tsubst (t, args, complain, in_decl); else { if (!(complain & tf_warning)) ++c_inhibit_evaluation_warnings; r = tsubst_expr (t, args, complain, in_decl, /*integral_constant_expression_p=*/true); if (!(complain & tf_warning)) --c_inhibit_evaluation_warnings; } return r; } /* Given a function parameter pack TMPL_PARM and some function parameters instantiated from it at *SPEC_P, return a NONTYPE_ARGUMENT_PACK of them and set *SPEC_P to point at the next point in the list. */ tree extract_fnparm_pack (tree tmpl_parm, tree *spec_p) { /* Collect all of the extra "packed" parameters into an argument pack. */ tree argpack; tree spec_parm = *spec_p; int len; for (len = 0; spec_parm; ++len, spec_parm = TREE_CHAIN (spec_parm)) if (tmpl_parm && !function_parameter_expanded_from_pack_p (spec_parm, tmpl_parm)) break; spec_parm = *spec_p; if (len == 1 && DECL_PACK_P (spec_parm)) { /* The instantiation is still a parameter pack; don't wrap it in a NONTYPE_ARGUMENT_PACK. */ argpack = spec_parm; spec_parm = DECL_CHAIN (spec_parm); } else { /* Fill in PARMVEC with all of the parameters. */ tree parmvec = make_tree_vec (len); argpack = make_node (NONTYPE_ARGUMENT_PACK); for (int i = 0; i < len; i++) { tree elt = spec_parm; if (DECL_PACK_P (elt)) elt = make_pack_expansion (elt); TREE_VEC_ELT (parmvec, i) = elt; spec_parm = DECL_CHAIN (spec_parm); } /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, parmvec); } *spec_p = spec_parm; return argpack; } /* Give a chain SPEC_PARM of PARM_DECLs, pack them into a NONTYPE_ARGUMENT_PACK. */ static tree make_fnparm_pack (tree spec_parm) { return extract_fnparm_pack (NULL_TREE, &spec_parm); } /* Return 1 if the Ith element of the argument pack ARG_PACK is a pack expansion with no extra args, 2 if it has extra args, or 0 if it is not a pack expansion. */ static int argument_pack_element_is_expansion_p (tree arg_pack, int i) { if (TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT) /* We're being called before this happens in tsubst_pack_expansion. */ arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack); tree vec = ARGUMENT_PACK_ARGS (arg_pack); if (i >= TREE_VEC_LENGTH (vec)) return 0; tree elt = TREE_VEC_ELT (vec, i); if (DECL_P (elt)) /* A decl pack is itself an expansion. */ elt = TREE_TYPE (elt); if (!PACK_EXPANSION_P (elt)) return 0; if (PACK_EXPANSION_EXTRA_ARGS (elt)) return 2; return 1; } /* Creates and return an ARGUMENT_PACK_SELECT tree node. */ static tree make_argument_pack_select (tree arg_pack, unsigned index) { tree aps = make_node (ARGUMENT_PACK_SELECT); ARGUMENT_PACK_SELECT_FROM_PACK (aps) = arg_pack; ARGUMENT_PACK_SELECT_INDEX (aps) = index; return aps; } /* This is a subroutine of tsubst_pack_expansion. It returns TRUE if we need to use the PACK_EXPANSION_EXTRA_ARGS mechanism to store the (non complete list of) arguments of the substitution and return a non substituted pack expansion, in order to wait for when we have enough arguments to really perform the substitution. */ static bool use_pack_expansion_extra_args_p (tree parm_packs, int arg_pack_len, bool has_empty_arg) { /* If one pack has an expansion and another pack has a normal argument or if one pack has an empty argument and an another one hasn't then tsubst_pack_expansion cannot perform the substitution and need to fall back on the PACK_EXPANSION_EXTRA mechanism. */ if (parm_packs == NULL_TREE) return false; else if (has_empty_arg) { /* If all the actual packs are pack expansions, we can still subsitute directly. */ for (tree p = parm_packs; p; p = TREE_CHAIN (p)) { tree a = TREE_VALUE (p); if (TREE_CODE (a) == ARGUMENT_PACK_SELECT) a = ARGUMENT_PACK_SELECT_FROM_PACK (a); a = ARGUMENT_PACK_ARGS (a); if (TREE_VEC_LENGTH (a) == 1) a = TREE_VEC_ELT (a, 0); if (PACK_EXPANSION_P (a)) continue; return true; } return false; } bool has_expansion_arg = false; for (int i = 0 ; i < arg_pack_len; ++i) { bool has_non_expansion_arg = false; for (tree parm_pack = parm_packs; parm_pack; parm_pack = TREE_CHAIN (parm_pack)) { tree arg = TREE_VALUE (parm_pack); int exp = argument_pack_element_is_expansion_p (arg, i); if (exp == 2) /* We can't substitute a pack expansion with extra args into our pattern. */ return true; else if (exp) has_expansion_arg = true; else has_non_expansion_arg = true; } if (has_expansion_arg && has_non_expansion_arg) return true; } return false; } /* [temp.variadic]/6 says that: The instantiation of a pack expansion [...] produces a list E1,E2, ..., En, where N is the number of elements in the pack expansion parameters. This subroutine of tsubst_pack_expansion produces one of these Ei. PATTERN is the pattern of the pack expansion. PARM_PACKS is a TREE_LIST in which each TREE_PURPOSE is a parameter pack of PATTERN, and each TREE_VALUE is its corresponding argument pack. INDEX is the index 'i' of the element Ei to produce. ARGS, COMPLAIN, and IN_DECL are the same parameters as for the tsubst_pack_expansion function. The function returns the resulting Ei upon successful completion, or error_mark_node. Note that this function possibly modifies the ARGS parameter, so it's the responsibility of the caller to restore it. */ static tree gen_elem_of_pack_expansion_instantiation (tree pattern, tree parm_packs, unsigned index, tree args /* This parm gets modified. */, tsubst_flags_t complain, tree in_decl) { tree t; bool ith_elem_is_expansion = false; /* For each parameter pack, change the substitution of the parameter pack to the ith argument in its argument pack, then expand the pattern. */ for (tree pack = parm_packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); tree arg_pack = TREE_VALUE (pack); tree aps; /* instance of ARGUMENT_PACK_SELECT. */ ith_elem_is_expansion |= argument_pack_element_is_expansion_p (arg_pack, index); /* Select the Ith argument from the pack. */ if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) { if (index == 0) { aps = make_argument_pack_select (arg_pack, index); if (!mark_used (parm, complain) && !(complain & tf_error)) return error_mark_node; register_local_specialization (aps, parm); } else aps = retrieve_local_specialization (parm); } else { int idx, level; template_parm_level_and_index (parm, &level, &idx); if (index == 0) { aps = make_argument_pack_select (arg_pack, index); /* Update the corresponding argument. */ TMPL_ARG (args, level, idx) = aps; } else /* Re-use the ARGUMENT_PACK_SELECT. */ aps = TMPL_ARG (args, level, idx); } ARGUMENT_PACK_SELECT_INDEX (aps) = index; } /* Substitute into the PATTERN with the (possibly altered) arguments. */ if (pattern == in_decl) /* Expanding a fixed parameter pack from coerce_template_parameter_pack. */ t = tsubst_decl (pattern, args, complain); else if (pattern == error_mark_node) t = error_mark_node; else if (!TYPE_P (pattern)) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); /* If the Ith argument pack element is a pack expansion, then the Ith element resulting from the substituting is going to be a pack expansion as well. */ if (ith_elem_is_expansion) t = make_pack_expansion (t, complain); return t; } /* When the unexpanded parameter pack in a fold expression expands to an empty sequence, the value of the expression is as follows; the program is ill-formed if the operator is not listed in this table. && true || false , void() */ tree expand_empty_fold (tree t, tsubst_flags_t complain) { tree_code code = (tree_code)TREE_INT_CST_LOW (TREE_OPERAND (t, 0)); if (!FOLD_EXPR_MODIFY_P (t)) switch (code) { case TRUTH_ANDIF_EXPR: return boolean_true_node; case TRUTH_ORIF_EXPR: return boolean_false_node; case COMPOUND_EXPR: return void_node; default: break; } if (complain & tf_error) error_at (location_of (t), "fold of empty expansion over %O", code); return error_mark_node; } /* Given a fold-expression T and a current LEFT and RIGHT operand, form an expression that combines the two terms using the operator of T. */ static tree fold_expression (tree t, tree left, tree right, tsubst_flags_t complain) { tree op = FOLD_EXPR_OP (t); tree_code code = (tree_code)TREE_INT_CST_LOW (op); // Handle compound assignment operators. if (FOLD_EXPR_MODIFY_P (t)) return build_x_modify_expr (input_location, left, code, right, complain); warning_sentinel s(warn_parentheses); switch (code) { case COMPOUND_EXPR: return build_x_compound_expr (input_location, left, right, complain); default: return build_x_binary_op (input_location, code, left, TREE_CODE (left), right, TREE_CODE (right), /*overload=*/NULL, complain); } } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_pack (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_pack_expansion (FOLD_EXPR_PACK (t), args, complain, in_decl); } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_init (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_expr (FOLD_EXPR_INIT (t), args, complain, in_decl, false); } /* Expand a PACK of arguments into a grouped as left fold. Given a pack containing elements A0, A1, ..., An and an operator @, this builds the expression: ((A0 @ A1) @ A2) ... @ An Note that PACK must not be empty. The operator is defined by the original fold expression T. */ static tree expand_left_fold (tree t, tree pack, tsubst_flags_t complain) { tree left = TREE_VEC_ELT (pack, 0); for (int i = 1; i < TREE_VEC_LENGTH (pack); ++i) { tree right = TREE_VEC_ELT (pack, i); left = fold_expression (t, left, right, complain); } return left; } /* Substitute into a unary left fold expression. */ static tree tsubst_unary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_left_fold (t, pack, complain); } /* Substitute into a binary left fold expression. Do ths by building a single (non-empty) vector of argumnts and building the expression from those elements. */ static tree tsubst_binary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } tree vec = make_tree_vec (TREE_VEC_LENGTH (pack) + 1); TREE_VEC_ELT (vec, 0) = init; for (int i = 0; i < TREE_VEC_LENGTH (pack); ++i) TREE_VEC_ELT (vec, i + 1) = TREE_VEC_ELT (pack, i); return expand_left_fold (t, vec, complain); } /* Expand a PACK of arguments into a grouped as right fold. Given a pack containing elementns A0, A1, ..., and an operator @, this builds the expression: A0@ ... (An-2 @ (An-1 @ An)) Note that PACK must not be empty. The operator is defined by the original fold expression T. */ tree expand_right_fold (tree t, tree pack, tsubst_flags_t complain) { // Build the expression. int n = TREE_VEC_LENGTH (pack); tree right = TREE_VEC_ELT (pack, n - 1); for (--n; n != 0; --n) { tree left = TREE_VEC_ELT (pack, n - 1); right = fold_expression (t, left, right, complain); } return right; } /* Substitute into a unary right fold expression. */ static tree tsubst_unary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_right_fold (t, pack, complain); } /* Substitute into a binary right fold expression. Do ths by building a single (non-empty) vector of arguments and building the expression from those elements. */ static tree tsubst_binary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } int n = TREE_VEC_LENGTH (pack); tree vec = make_tree_vec (n + 1); for (int i = 0; i < n; ++i) TREE_VEC_ELT (vec, i) = TREE_VEC_ELT (pack, i); TREE_VEC_ELT (vec, n) = init; return expand_right_fold (t, vec, complain); } /* Walk through the pattern of a pack expansion, adding everything in local_specializations to a list. */ class el_data { public: hash_set<tree> internal; tree extra; tsubst_flags_t complain; el_data (tsubst_flags_t c) : extra (NULL_TREE), complain (c) {} }; static tree extract_locals_r (tree *tp, int */*walk_subtrees*/, void *data_) { el_data &data = *reinterpret_cast<el_data*>(data_); tree *extra = &data.extra; tsubst_flags_t complain = data.complain; if (TYPE_P (*tp) && typedef_variant_p (*tp)) /* Remember local typedefs (85214). */ tp = &TYPE_NAME (*tp); if (TREE_CODE (*tp) == DECL_EXPR) data.internal.add (DECL_EXPR_DECL (*tp)); else if (tree spec = retrieve_local_specialization (*tp)) { if (data.internal.contains (*tp)) /* Don't mess with variables declared within the pattern. */ return NULL_TREE; if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Maybe pull out the PARM_DECL for a partial instantiation. */ tree args = ARGUMENT_PACK_ARGS (spec); if (TREE_VEC_LENGTH (args) == 1) { tree elt = TREE_VEC_ELT (args, 0); if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (DECL_PACK_P (elt)) spec = elt; } if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Handle lambda capture here, since we aren't doing any substitution now, and so tsubst_copy won't call process_outer_var_ref. */ tree args = ARGUMENT_PACK_ARGS (spec); int len = TREE_VEC_LENGTH (args); for (int i = 0; i < len; ++i) { tree arg = TREE_VEC_ELT (args, i); tree carg = arg; if (outer_automatic_var_p (arg)) carg = process_outer_var_ref (arg, complain); if (carg != arg) { /* Make a new NONTYPE_ARGUMENT_PACK of the capture proxies. */ if (i == 0) { spec = copy_node (spec); args = copy_node (args); SET_ARGUMENT_PACK_ARGS (spec, args); register_local_specialization (spec, *tp); } TREE_VEC_ELT (args, i) = carg; } } } } if (outer_automatic_var_p (spec)) spec = process_outer_var_ref (spec, complain); *extra = tree_cons (*tp, spec, *extra); } return NULL_TREE; } static tree extract_local_specs (tree pattern, tsubst_flags_t complain) { el_data data (complain); cp_walk_tree_without_duplicates (&pattern, extract_locals_r, &data); return data.extra; } /* Extract any uses of local_specializations from PATTERN and add them to ARGS for use in PACK_EXPANSION_EXTRA_ARGS. */ tree build_extra_args (tree pattern, tree args, tsubst_flags_t complain) { tree extra = args; if (local_specializations) if (tree locals = extract_local_specs (pattern, complain)) extra = tree_cons (NULL_TREE, extra, locals); return extra; } /* Apply any local specializations from PACK_EXPANSION_EXTRA_ARGS and add the normal template args to ARGS. */ tree add_extra_args (tree extra, tree args) { if (extra && TREE_CODE (extra) == TREE_LIST) { for (tree elt = TREE_CHAIN (extra); elt; elt = TREE_CHAIN (elt)) { /* The partial instantiation involved local declarations collected in extract_local_specs; map from the general template to our local context. */ tree gen = TREE_PURPOSE (elt); tree inst = TREE_VALUE (elt); if (DECL_P (inst)) if (tree local = retrieve_local_specialization (inst)) inst = local; /* else inst is already a full instantiation of the pack. */ register_local_specialization (inst, gen); } gcc_assert (!TREE_PURPOSE (extra)); extra = TREE_VALUE (extra); } #if 1 /* I think we should always be able to substitute dependent args into the pattern. If that turns out to be incorrect in some cases, enable the alternate code (and add complain/in_decl parms to this function). */ gcc_checking_assert (!uses_template_parms (extra)); #else if (!uses_template_parms (extra)) { gcc_unreachable (); extra = tsubst_template_args (extra, args, complain, in_decl); args = add_outermost_template_args (args, extra); } else #endif args = add_to_template_args (extra, args); return args; } /* Substitute ARGS into T, which is an pack expansion (i.e. TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION). Returns a TREE_VEC with the substituted arguments, a PACK_EXPANSION_* node (if only a partial substitution could be performed) or ERROR_MARK_NODE if there was an error. */ tree tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pattern; tree pack, packs = NULL_TREE; bool unsubstituted_packs = false; int i, len = -1; tree result; bool need_local_specializations = false; int levels; gcc_assert (PACK_EXPANSION_P (t)); pattern = PACK_EXPANSION_PATTERN (t); /* Add in any args remembered from an earlier partial instantiation. */ args = add_extra_args (PACK_EXPANSION_EXTRA_ARGS (t), args); levels = TMPL_ARGS_DEPTH (args); /* Determine the argument packs that will instantiate the parameter packs used in the expansion expression. While we're at it, compute the number of arguments to be expanded and make sure it is consistent. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); tree arg_pack = NULL_TREE; tree orig_arg = NULL_TREE; int level = 0; if (TREE_CODE (parm_pack) == BASES) { gcc_assert (parm_pack == pattern); if (BASES_DIRECT (parm_pack)) return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); else return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); } else if (builtin_pack_call_p (parm_pack)) { if (parm_pack != pattern) { if (complain & tf_error) sorry ("%qE is not the entire pattern of the pack expansion", parm_pack); return error_mark_node; } return expand_builtin_pack_call (parm_pack, args, complain, in_decl); } else if (TREE_CODE (parm_pack) == PARM_DECL) { /* We know we have correct local_specializations if this expansion is at function scope, or if we're dealing with a local parameter in a requires expression; for the latter, tsubst_requires_expr set it up appropriately. */ if (PACK_EXPANSION_LOCAL_P (t) || CONSTRAINT_VAR_P (parm_pack)) arg_pack = retrieve_local_specialization (parm_pack); else /* We can't rely on local_specializations for a parameter name used later in a function declaration (such as in a late-specified return type). Even if it exists, it might have the wrong value for a recursive call. */ need_local_specializations = true; if (!arg_pack) { /* This parameter pack was used in an unevaluated context. Just make a dummy decl, since it's only used for its type. */ ++cp_unevaluated_operand; arg_pack = tsubst_decl (parm_pack, args, complain); --cp_unevaluated_operand; if (arg_pack && DECL_PACK_P (arg_pack)) /* Partial instantiation of the parm_pack, we can't build up an argument pack yet. */ arg_pack = NULL_TREE; else arg_pack = make_fnparm_pack (arg_pack); } else if (DECL_PACK_P (arg_pack)) /* This argument pack isn't fully instantiated yet. */ arg_pack = NULL_TREE; } else if (is_capture_proxy (parm_pack)) { arg_pack = retrieve_local_specialization (parm_pack); if (DECL_PACK_P (arg_pack)) arg_pack = NULL_TREE; } else { int idx; template_parm_level_and_index (parm_pack, &level, &idx); if (level <= levels) arg_pack = TMPL_ARG (args, level, idx); if (arg_pack && TREE_CODE (arg_pack) == TEMPLATE_TYPE_PARM && TEMPLATE_TYPE_PARAMETER_PACK (arg_pack)) arg_pack = NULL_TREE; } orig_arg = arg_pack; if (arg_pack && TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT) arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack); if (arg_pack && !ARGUMENT_PACK_P (arg_pack)) /* This can only happen if we forget to expand an argument pack somewhere else. Just return an error, silently. */ { result = make_tree_vec (1); TREE_VEC_ELT (result, 0) = error_mark_node; return result; } if (arg_pack) { int my_len = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg_pack)); /* Don't bother trying to do a partial substitution with incomplete packs; we'll try again after deduction. */ if (ARGUMENT_PACK_INCOMPLETE_P (arg_pack)) return t; if (len < 0) len = my_len; else if (len != my_len) { if (!(complain & tf_error)) /* Fail quietly. */; else if (TREE_CODE (t) == TYPE_PACK_EXPANSION) error ("mismatched argument pack lengths while expanding %qT", pattern); else error ("mismatched argument pack lengths while expanding %qE", pattern); return error_mark_node; } /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, arg_pack, packs); TREE_TYPE (packs) = orig_arg; } else { /* We can't substitute for this parameter pack. We use a flag as well as the missing_level counter because function parameter packs don't have a level. */ gcc_assert (processing_template_decl || is_auto (parm_pack)); unsubstituted_packs = true; } } /* If the expansion is just T..., return the matching argument pack, unless we need to call convert_from_reference on all the elements. This is an important optimization; see c++/68422. */ if (!unsubstituted_packs && TREE_PURPOSE (packs) == pattern) { tree args = ARGUMENT_PACK_ARGS (TREE_VALUE (packs)); /* If the argument pack is a single pack expansion, pull it out. */ if (TREE_VEC_LENGTH (args) == 1 && pack_expansion_args_count (args)) return TREE_VEC_ELT (args, 0); /* Types need no adjustment, nor does sizeof..., and if we still have some pack expansion args we won't do anything yet. */ if (TREE_CODE (t) == TYPE_PACK_EXPANSION || PACK_EXPANSION_SIZEOF_P (t) || pack_expansion_args_count (args)) return args; /* Also optimize expression pack expansions if we can tell that the elements won't have reference type. */ tree type = TREE_TYPE (pattern); if (type && !TYPE_REF_P (type) && !PACK_EXPANSION_P (type) && !WILDCARD_TYPE_P (type)) return args; /* Otherwise use the normal path so we get convert_from_reference. */ } /* We cannot expand this expansion expression, because we don't have all of the argument packs we need. */ if (use_pack_expansion_extra_args_p (packs, len, unsubstituted_packs)) { /* We got some full packs, but we can't substitute them in until we have values for all the packs. So remember these until then. */ t = make_pack_expansion (pattern, complain); PACK_EXPANSION_EXTRA_ARGS (t) = build_extra_args (pattern, args, complain); return t; } /* If NEED_LOCAL_SPECIALIZATIONS then we're in a late-specified return type, so create our own local specializations map; the current map is either NULL or (in the case of recursive unification) might have bindings that we don't want to use or alter. */ local_specialization_stack lss (need_local_specializations ? lss_blank : lss_nop); if (unsubstituted_packs) { /* There were no real arguments, we're just replacing a parameter pack with another version of itself. Substitute into the pattern and return a PACK_EXPANSION_*. The caller will need to deal with that. */ if (TREE_CODE (t) == EXPR_PACK_EXPANSION) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); t = make_pack_expansion (t, complain); return t; } gcc_assert (len >= 0); /* For each argument in each argument pack, substitute into the pattern. */ result = make_tree_vec (len); tree elem_args = copy_template_args (args); for (i = 0; i < len; ++i) { t = gen_elem_of_pack_expansion_instantiation (pattern, packs, i, elem_args, complain, in_decl); TREE_VEC_ELT (result, i) = t; if (t == error_mark_node) { result = error_mark_node; break; } } /* Update ARGS to restore the substitution from parameter packs to their argument packs. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) register_local_specialization (TREE_TYPE (pack), parm); else { int idx, level; if (TREE_VALUE (pack) == NULL_TREE) continue; template_parm_level_and_index (parm, &level, &idx); /* Update the corresponding argument. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) TREE_VEC_ELT (TREE_VEC_ELT (args, level -1 ), idx) = TREE_TYPE (pack); else TREE_VEC_ELT (args, idx) = TREE_TYPE (pack); } } /* If the dependent pack arguments were such that we end up with only a single pack expansion again, there's no need to keep it in a TREE_VEC. */ if (len == 1 && TREE_CODE (result) == TREE_VEC && PACK_EXPANSION_P (TREE_VEC_ELT (result, 0))) return TREE_VEC_ELT (result, 0); return result; } /* Given PARM_DECL PARM, find the corresponding PARM_DECL in the template TMPL. We do this using DECL_PARM_INDEX, which should work even with parameter packs; all parms generated from a function parameter pack will have the same DECL_PARM_INDEX. */ tree get_pattern_parm (tree parm, tree tmpl) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree patparm; if (DECL_ARTIFICIAL (parm)) { for (patparm = DECL_ARGUMENTS (pattern); patparm; patparm = DECL_CHAIN (patparm)) if (DECL_ARTIFICIAL (patparm) && DECL_NAME (parm) == DECL_NAME (patparm)) break; } else { patparm = FUNCTION_FIRST_USER_PARM (DECL_TEMPLATE_RESULT (tmpl)); patparm = chain_index (DECL_PARM_INDEX (parm)-1, patparm); gcc_assert (DECL_PARM_INDEX (patparm) == DECL_PARM_INDEX (parm)); } return patparm; } /* Make an argument pack out of the TREE_VEC VEC. */ static tree make_argument_pack (tree vec) { tree pack; tree elt = TREE_VEC_ELT (vec, 0); if (TYPE_P (elt)) pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (pack) = 1; } SET_ARGUMENT_PACK_ARGS (pack, vec); return pack; } /* Return an exact copy of template args T that can be modified independently. */ static tree copy_template_args (tree t) { if (t == error_mark_node) return t; int len = TREE_VEC_LENGTH (t); tree new_vec = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (t, i); if (elt && TREE_CODE (elt) == TREE_VEC) elt = copy_template_args (elt); TREE_VEC_ELT (new_vec, i) = elt; } NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_vec) = NON_DEFAULT_TEMPLATE_ARGS_COUNT (t); return new_vec; } /* Substitute ARGS into the *_ARGUMENT_PACK orig_arg. */ tree tsubst_argument_pack (tree orig_arg, tree args, tsubst_flags_t complain, tree in_decl) { /* Substitute into each of the arguments. */ tree new_arg = TYPE_P (orig_arg) ? cxx_make_type (TREE_CODE (orig_arg)) : make_node (TREE_CODE (orig_arg)); tree pack_args = tsubst_template_args (ARGUMENT_PACK_ARGS (orig_arg), args, complain, in_decl); if (pack_args == error_mark_node) new_arg = error_mark_node; else SET_ARGUMENT_PACK_ARGS (new_arg, pack_args); if (TREE_CODE (new_arg) == NONTYPE_ARGUMENT_PACK) TREE_CONSTANT (new_arg) = TREE_CONSTANT (orig_arg); return new_arg; } /* Substitute ARGS into the vector or list of template arguments T. */ tree tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree orig_t = t; int len, need_new = 0, i, expanded_len_adjust = 0, out; tree *elts; if (t == error_mark_node) return error_mark_node; len = TREE_VEC_LENGTH (t); elts = XALLOCAVEC (tree, len); for (i = 0; i < len; i++) { tree orig_arg = TREE_VEC_ELT (t, i); tree new_arg; if (TREE_CODE (orig_arg) == TREE_VEC) new_arg = tsubst_template_args (orig_arg, args, complain, in_decl); else if (PACK_EXPANSION_P (orig_arg)) { /* Substitute into an expansion expression. */ new_arg = tsubst_pack_expansion (orig_arg, args, complain, in_decl); if (TREE_CODE (new_arg) == TREE_VEC) /* Add to the expanded length adjustment the number of expanded arguments. We subtract one from this measurement, because the argument pack expression itself is already counted as 1 in LEN. EXPANDED_LEN_ADJUST can actually be negative, if the argument pack is empty. */ expanded_len_adjust += TREE_VEC_LENGTH (new_arg) - 1; } else if (ARGUMENT_PACK_P (orig_arg)) new_arg = tsubst_argument_pack (orig_arg, args, complain, in_decl); else new_arg = tsubst_template_arg (orig_arg, args, complain, in_decl); if (new_arg == error_mark_node) return error_mark_node; elts[i] = new_arg; if (new_arg != orig_arg) need_new = 1; } if (!need_new) return t; /* Make space for the expanded arguments coming from template argument packs. */ t = make_tree_vec (len + expanded_len_adjust); /* ORIG_T can contain TREE_VECs. That happens if ORIG_T contains the arguments for a member template. In that case each TREE_VEC in ORIG_T represents a level of template arguments, and ORIG_T won't carry any non defaulted argument count. It will rather be the nested TREE_VECs that will carry one. In other words, ORIG_T carries a non defaulted argument count only if it doesn't contain any nested TREE_VEC. */ if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t)) { int count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t); count += expanded_len_adjust; SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (t, count); } for (i = 0, out = 0; i < len; i++) { if ((PACK_EXPANSION_P (TREE_VEC_ELT (orig_t, i)) || ARGUMENT_PACK_P (TREE_VEC_ELT (orig_t, i))) && TREE_CODE (elts[i]) == TREE_VEC) { int idx; /* Now expand the template argument pack "in place". */ for (idx = 0; idx < TREE_VEC_LENGTH (elts[i]); idx++, out++) TREE_VEC_ELT (t, out) = TREE_VEC_ELT (elts[i], idx); } else { TREE_VEC_ELT (t, out) = elts[i]; out++; } } return t; } /* Substitute ARGS into one level PARMS of template parameters. */ static tree tsubst_template_parms_level (tree parms, tree args, tsubst_flags_t complain) { if (parms == error_mark_node) return error_mark_node; tree new_vec = make_tree_vec (TREE_VEC_LENGTH (parms)); for (int i = 0; i < TREE_VEC_LENGTH (new_vec); ++i) { tree tuple = TREE_VEC_ELT (parms, i); if (tuple == error_mark_node) continue; TREE_VEC_ELT (new_vec, i) = tsubst_template_parm (tuple, args, complain); } return new_vec; } /* Return the result of substituting ARGS into the template parameters given by PARMS. If there are m levels of ARGS and m + n levels of PARMS, then the result will contain n levels of PARMS. For example, if PARMS is `template <class T> template <class U> template <T*, U, class V>' and ARGS is {{int}, {double}} then the result will be `template <int*, double, class V>'. */ static tree tsubst_template_parms (tree parms, tree args, tsubst_flags_t complain) { tree r = NULL_TREE; tree* new_parms; /* When substituting into a template, we must set PROCESSING_TEMPLATE_DECL as the template parameters may be dependent if they are based on one-another, and the dependency predicates are short-circuit outside of templates. */ ++processing_template_decl; for (new_parms = &r; parms && TMPL_PARMS_DEPTH (parms) > TMPL_ARGS_DEPTH (args); new_parms = &(TREE_CHAIN (*new_parms)), parms = TREE_CHAIN (parms)) { tree new_vec = tsubst_template_parms_level (TREE_VALUE (parms), args, complain); *new_parms = tree_cons (size_int (TMPL_PARMS_DEPTH (parms) - TMPL_ARGS_DEPTH (args)), new_vec, NULL_TREE); TEMPLATE_PARMS_CONSTRAINTS (*new_parms) = TEMPLATE_PARMS_CONSTRAINTS (parms); } --processing_template_decl; return r; } /* Return the result of substituting ARGS into one template parameter given by T. T Must be a TREE_LIST which TREE_VALUE is the template parameter and which TREE_PURPOSE is the default argument of the template parameter. */ static tree tsubst_template_parm (tree t, tree args, tsubst_flags_t complain) { tree default_value, parm_decl; if (args == NULL_TREE || t == NULL_TREE || t == error_mark_node) return t; gcc_assert (TREE_CODE (t) == TREE_LIST); default_value = TREE_PURPOSE (t); parm_decl = TREE_VALUE (t); tree constraint = TEMPLATE_PARM_CONSTRAINTS (t); parm_decl = tsubst (parm_decl, args, complain, NULL_TREE); if (TREE_CODE (parm_decl) == PARM_DECL && invalid_nontype_parm_type_p (TREE_TYPE (parm_decl), complain)) parm_decl = error_mark_node; default_value = tsubst_template_arg (default_value, args, complain, NULL_TREE); constraint = tsubst_constraint (constraint, args, complain, NULL_TREE); tree r = build_tree_list (default_value, parm_decl); TEMPLATE_PARM_CONSTRAINTS (r) = constraint; return r; } /* Substitute the ARGS into the indicated aggregate (or enumeration) type T. If T is not an aggregate or enumeration type, it is handled as if by tsubst. IN_DECL is as for tsubst. If ENTERING_SCOPE is nonzero, T is the context for a template which we are presently tsubst'ing. Return the substituted value. */ static tree tsubst_aggr_type (tree t, tree args, tsubst_flags_t complain, tree in_decl, int entering_scope) { if (t == NULL_TREE) return NULL_TREE; switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) return tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, complain, in_decl); /* Fall through. */ case ENUMERAL_TYPE: case UNION_TYPE: if (TYPE_TEMPLATE_INFO (t) && uses_template_parms (t)) { tree argvec; tree context; tree r; /* In "sizeof(X<I>)" we need to evaluate "I". */ cp_evaluated ev; /* First, determine the context for the type we are looking up. */ context = TYPE_CONTEXT (t); if (context && TYPE_P (context)) { context = tsubst_aggr_type (context, args, complain, in_decl, /*entering_scope=*/1); /* If context is a nested class inside a class template, it may still need to be instantiated (c++/33959). */ context = complete_type (context); } /* Then, figure out what arguments are appropriate for the type we are trying to find. For example, given: template <class T> struct S; template <class T, class U> void f(T, U) { S<U> su; } and supposing that we are instantiating f<int, double>, then our ARGS will be {int, double}, but, when looking up S we only want {double}. */ argvec = tsubst_template_args (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) r = error_mark_node; else if (!entering_scope && cxx_dialect >= cxx2a && dependent_scope_p (context)) { /* See maybe_dependent_member_ref. */ tree name = TYPE_IDENTIFIER (t); tree fullname = name; if (instantiates_primary_template_p (t)) fullname = build_nt (TEMPLATE_ID_EXPR, name, INNERMOST_TEMPLATE_ARGS (argvec)); return build_typename_type (context, name, fullname, typename_type); } else { r = lookup_template_class (t, argvec, in_decl, context, entering_scope, complain); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); } return r; } else /* This is not a template type, so there's nothing to do. */ return t; default: return tsubst (t, args, complain, in_decl); } } static GTY((cache)) decl_tree_cache_map *defarg_inst; /* Substitute into the default argument ARG (a default argument for FN), which has the indicated TYPE. */ tree tsubst_default_argument (tree fn, int parmnum, tree type, tree arg, tsubst_flags_t complain) { int errs = errorcount + sorrycount; /* This can happen in invalid code. */ if (TREE_CODE (arg) == DEFERRED_PARSE) return arg; tree parm = FUNCTION_FIRST_USER_PARM (fn); parm = chain_index (parmnum, parm); tree parmtype = TREE_TYPE (parm); if (DECL_BY_REFERENCE (parm)) parmtype = TREE_TYPE (parmtype); if (parmtype == error_mark_node) return error_mark_node; gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, parmtype)); tree *slot; if (defarg_inst && (slot = defarg_inst->get (parm))) return *slot; /* This default argument came from a template. Instantiate the default argument here, not in tsubst. In the case of something like: template <class T> struct S { static T t(); void f(T = t()); }; we must be careful to do name lookup in the scope of S<T>, rather than in the current class. */ push_to_top_level (); push_access_scope (fn); push_deferring_access_checks (dk_no_deferred); start_lambda_scope (parm); /* The default argument expression may cause implicitly defined member functions to be synthesized, which will result in garbage collection. We must treat this situation as if we were within the body of function so as to avoid collecting live data on the stack. */ ++function_depth; arg = tsubst_expr (arg, DECL_TI_ARGS (fn), complain, NULL_TREE, /*integral_constant_expression_p=*/false); --function_depth; finish_lambda_scope (); /* Make sure the default argument is reasonable. */ arg = check_default_argument (type, arg, complain); if (errorcount+sorrycount > errs && (complain & tf_warning_or_error)) inform (input_location, " when instantiating default argument for call to %qD", fn); pop_deferring_access_checks (); pop_access_scope (fn); pop_from_top_level (); if (arg != error_mark_node && !cp_unevaluated_operand) { if (!defarg_inst) defarg_inst = decl_tree_cache_map::create_ggc (37); defarg_inst->put (parm, arg); } return arg; } /* Substitute into all the default arguments for FN. */ static void tsubst_default_arguments (tree fn, tsubst_flags_t complain) { tree arg; tree tmpl_args; tmpl_args = DECL_TI_ARGS (fn); /* If this function is not yet instantiated, we certainly don't need its default arguments. */ if (uses_template_parms (tmpl_args)) return; /* Don't do this again for clones. */ if (DECL_CLONED_FUNCTION_P (fn)) return; int i = 0; for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); arg; arg = TREE_CHAIN (arg), ++i) if (TREE_PURPOSE (arg)) TREE_PURPOSE (arg) = tsubst_default_argument (fn, i, TREE_VALUE (arg), TREE_PURPOSE (arg), complain); } /* Hash table mapping a FUNCTION_DECL to its dependent explicit-specifier. */ static GTY((cache)) decl_tree_cache_map *explicit_specifier_map; /* Store a pair to EXPLICIT_SPECIFIER_MAP. */ void store_explicit_specifier (tree v, tree t) { if (!explicit_specifier_map) explicit_specifier_map = decl_tree_cache_map::create_ggc (37); DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (v) = true; explicit_specifier_map->put (v, t); } /* Lookup an element in EXPLICIT_SPECIFIER_MAP. */ static tree lookup_explicit_specifier (tree v) { return *explicit_specifier_map->get (v); } /* Given T, a FUNCTION_TYPE or METHOD_TYPE, construct and return a corresponding FUNCTION_TYPE or METHOD_TYPE whose return type is RETURN_TYPE, argument types are ARG_TYPES, and exception specification is RAISES, and otherwise is identical to T. */ static tree rebuild_function_or_method_type (tree t, tree return_type, tree arg_types, tree raises, tsubst_flags_t complain) { gcc_assert (FUNC_OR_METHOD_TYPE_P (t)); tree new_type; if (TREE_CODE (t) == FUNCTION_TYPE) { new_type = build_function_type (return_type, arg_types); new_type = apply_memfn_quals (new_type, type_memfn_quals (t)); } else { tree r = TREE_TYPE (TREE_VALUE (arg_types)); /* Don't pick up extra function qualifiers from the basetype. */ r = cp_build_qualified_type_real (r, type_memfn_quals (t), complain); if (! MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member function of non-class type %qT", r); return error_mark_node; } new_type = build_method_type_directly (r, return_type, TREE_CHAIN (arg_types)); } new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (t)); cp_ref_qualifier rqual = type_memfn_rqual (t); bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t); return build_cp_fntype_variant (new_type, rqual, raises, late_return_type_p); } /* Check if the function type of DECL, a FUNCTION_DECL, agrees with the type of each of its formal parameters. If there is a disagreement then rebuild DECL's function type according to its formal parameter types, as part of a resolution for Core issues 1001/1322. */ static void maybe_rebuild_function_decl_type (tree decl) { bool function_type_needs_rebuilding = false; if (tree parm_list = FUNCTION_FIRST_USER_PARM (decl)) { tree parm_type_list = FUNCTION_FIRST_USER_PARMTYPE (decl); while (parm_type_list && parm_type_list != void_list_node) { tree parm_type = TREE_VALUE (parm_type_list); tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list)); if (!same_type_p (parm_type, formal_parm_type_unqual)) { function_type_needs_rebuilding = true; break; } parm_list = DECL_CHAIN (parm_list); parm_type_list = TREE_CHAIN (parm_type_list); } } if (!function_type_needs_rebuilding) return; const tree fntype = TREE_TYPE (decl); tree parm_list = DECL_ARGUMENTS (decl); tree old_parm_type_list = TYPE_ARG_TYPES (fntype); tree new_parm_type_list = NULL_TREE; tree *q = &new_parm_type_list; for (int skip = num_artificial_parms_for (decl); skip > 0; skip--) { *q = copy_node (old_parm_type_list); parm_list = DECL_CHAIN (parm_list); old_parm_type_list = TREE_CHAIN (old_parm_type_list); q = &TREE_CHAIN (*q); } while (old_parm_type_list && old_parm_type_list != void_list_node) { *q = copy_node (old_parm_type_list); tree *new_parm_type = &TREE_VALUE (*q); tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list)); if (!same_type_p (*new_parm_type, formal_parm_type_unqual)) *new_parm_type = formal_parm_type_unqual; parm_list = DECL_CHAIN (parm_list); old_parm_type_list = TREE_CHAIN (old_parm_type_list); q = &TREE_CHAIN (*q); } if (old_parm_type_list == void_list_node) *q = void_list_node; TREE_TYPE (decl) = rebuild_function_or_method_type (fntype, TREE_TYPE (fntype), new_parm_type_list, TYPE_RAISES_EXCEPTIONS (fntype), tf_none); } /* Subroutine of tsubst_decl for the case when T is a FUNCTION_DECL. */ static tree tsubst_function_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { tree gen_tmpl, argvec; hashval_t hash = 0; tree in_decl = t; /* Nobody should be tsubst'ing into non-template functions. */ gcc_assert (DECL_TEMPLATE_INFO (t) != NULL_TREE); if (TREE_CODE (DECL_TI_TEMPLATE (t)) == TEMPLATE_DECL) { /* If T is not dependent, just return it. */ if (!uses_template_parms (DECL_TI_ARGS (t)) && !LAMBDA_FUNCTION_P (t)) return t; /* Calculate the most general template of which R is a specialization. */ gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t)); /* We're substituting a lambda function under tsubst_lambda_expr but not directly from it; find the matching function we're already inside. But don't do this if T is a generic lambda with a single level of template parms, as in that case we're doing a normal instantiation. */ if (LAMBDA_FUNCTION_P (t) && !lambda_fntype && (!generic_lambda_fn_p (t) || TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) > 1)) return enclosing_instantiation_of (t); /* Calculate the complete set of arguments used to specialize R. */ argvec = tsubst_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (t))), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ if (!lambda_fntype) { hash = hash_tmpl_and_args (gen_tmpl, argvec); if (tree spec = retrieve_specialization (gen_tmpl, argvec, hash)) return spec; } /* We can see more levels of arguments than parameters if there was a specialization of a member template, like this: template <class T> struct S { template <class U> void f(); } template <> template <class U> void S<int>::f(U); Here, we'll be substituting into the specialization, because that's where we can find the code we actually want to generate, but we'll have enough arguments for the most general template. We also deal with the peculiar case: template <class T> struct S { template <class U> friend void f(); }; template <class U> void f() {} template S<int>; template void f<double>(); Here, the ARGS for the instantiation of will be {int, double}. But, we only need as many ARGS as there are levels of template parameters in CODE_PATTERN. We are careful not to get fooled into reducing the ARGS in situations like: template <class T> struct S { template <class U> void f(U); } template <class T> template <> void S<T>::f(int) {} which we can spot because the pattern will be a specialization in this case. */ int args_depth = TMPL_ARGS_DEPTH (args); int parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (t))); if (args_depth > parms_depth && !DECL_TEMPLATE_SPECIALIZATION (t)) args = get_innermost_template_args (args, parms_depth); } else { /* This special case arises when we have something like this: template <class T> struct S { friend void f<int>(int, double); }; Here, the DECL_TI_TEMPLATE for the friend declaration will be an IDENTIFIER_NODE. We are being called from tsubst_friend_function, and we want only to create a new decl (R) with appropriate types so that we can call determine_specialization. */ gen_tmpl = NULL_TREE; argvec = NULL_TREE; } tree closure = (lambda_fntype ? TYPE_METHOD_BASETYPE (lambda_fntype) : NULL_TREE); tree ctx = closure ? closure : DECL_CONTEXT (t); bool member = ctx && TYPE_P (ctx); if (member && !closure) ctx = tsubst_aggr_type (ctx, args, complain, t, /*entering_scope=*/1); tree type = (lambda_fntype ? lambda_fntype : tsubst (TREE_TYPE (t), args, complain | tf_fndecl_type, in_decl)); if (type == error_mark_node) return error_mark_node; /* If we hit excessive deduction depth, the type is bogus even if it isn't error_mark_node, so don't build a decl. */ if (excessive_deduction_depth) return error_mark_node; /* We do NOT check for matching decls pushed separately at this point, as they may not represent instantiations of this template, and in any case are considered separate under the discrete model. */ tree r = copy_decl (t); DECL_USE_TEMPLATE (r) = 0; TREE_TYPE (r) = type; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); SET_DECL_RTL (r, NULL); /* Leave DECL_INITIAL set on deleted instantiations. */ if (!DECL_DELETED_FN (r)) DECL_INITIAL (r) = NULL_TREE; DECL_CONTEXT (r) = ctx; /* Handle explicit(dependent-expr). */ if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t)) { tree spec = lookup_explicit_specifier (t); spec = tsubst_copy_and_build (spec, args, complain, in_decl, /*function_p=*/false, /*i_c_e_p=*/true); spec = build_explicit_specifier (spec, complain); DECL_NONCONVERTING_P (r) = (spec == boolean_true_node); } /* OpenMP UDRs have the only argument a reference to the declared type. We want to diagnose if the declared type is a reference, which is invalid, but as references to references are usually quietly merged, diagnose it here. */ if (DECL_OMP_DECLARE_REDUCTION_P (t)) { tree argtype = TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)))); argtype = tsubst (argtype, args, complain, in_decl); if (TYPE_REF_P (argtype)) error_at (DECL_SOURCE_LOCATION (t), "reference type %qT in " "%<#pragma omp declare reduction%>", argtype); if (strchr (IDENTIFIER_POINTER (DECL_NAME (t)), '~') == NULL) DECL_NAME (r) = omp_reduction_id (ERROR_MARK, DECL_NAME (t), argtype); } if (member && DECL_CONV_FN_P (r)) /* Type-conversion operator. Reconstruct the name, in case it's the name of one of the template's parameters. */ DECL_NAME (r) = make_conv_op_name (TREE_TYPE (type)); tree parms = DECL_ARGUMENTS (t); if (closure) parms = DECL_CHAIN (parms); parms = tsubst (parms, args, complain, t); for (tree parm = parms; parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = r; if (closure) { tree tparm = build_this_parm (r, closure, type_memfn_quals (type)); DECL_CHAIN (tparm) = parms; parms = tparm; } DECL_ARGUMENTS (r) = parms; DECL_RESULT (r) = NULL_TREE; maybe_rebuild_function_decl_type (r); TREE_STATIC (r) = 0; TREE_PUBLIC (r) = TREE_PUBLIC (t); DECL_EXTERNAL (r) = 1; /* If this is an instantiation of a function with internal linkage, we already know what object file linkage will be assigned to the instantiation. */ DECL_INTERFACE_KNOWN (r) = !TREE_PUBLIC (r); DECL_DEFER_OUTPUT (r) = 0; DECL_CHAIN (r) = NULL_TREE; DECL_PENDING_INLINE_INFO (r) = 0; DECL_PENDING_INLINE_P (r) = 0; DECL_SAVED_TREE (r) = NULL_TREE; DECL_STRUCT_FUNCTION (r) = NULL; TREE_USED (r) = 0; /* We'll re-clone as appropriate in instantiate_template. */ DECL_CLONED_FUNCTION (r) = NULL_TREE; /* If we aren't complaining now, return on error before we register the specialization so that we'll complain eventually. */ if ((complain & tf_error) == 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/false)) return error_mark_node; /* Associate the constraints directly with the instantiation. We don't substitute through the constraints; that's only done when they are checked. */ if (tree ci = get_constraints (t)) /* Unless we're regenerating a lambda, in which case we'll set the lambda's constraints in tsubst_lambda_expr. */ if (!lambda_fntype) set_constraints (r, ci); if (DECL_FRIEND_P (t) && DECL_FRIEND_CONTEXT (t)) SET_DECL_FRIEND_CONTEXT (r, tsubst (DECL_FRIEND_CONTEXT (t), args, complain, in_decl)); /* Set up the DECL_TEMPLATE_INFO for R. There's no need to do this in the special friend case mentioned above where GEN_TMPL is NULL. */ if (gen_tmpl && !closure) { DECL_TEMPLATE_INFO (r) = build_template_info (gen_tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); tree new_r = register_specialization (r, gen_tmpl, argvec, false, hash); if (new_r != r) /* We instantiated this while substituting into the type earlier (template/friend54.C). */ return new_r; /* We're not supposed to instantiate default arguments until they are called, for a template. But, for a declaration like: template <class T> void f () { extern void g(int i = T()); } we should do the substitution when the template is instantiated. We handle the member function case in instantiate_class_template since the default arguments might refer to other members of the class. */ if (!member && !PRIMARY_TEMPLATE_P (gen_tmpl) && !uses_template_parms (argvec)) tsubst_default_arguments (r, complain); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* Copy the list of befriending classes. */ for (tree *friends = &DECL_BEFRIENDING_CLASSES (r); *friends; friends = &TREE_CHAIN (*friends)) { *friends = copy_node (*friends); TREE_VALUE (*friends) = tsubst (TREE_VALUE (*friends), args, complain, in_decl); } if (DECL_CONSTRUCTOR_P (r) || DECL_DESTRUCTOR_P (r)) { maybe_retrofit_in_chrg (r); if (DECL_CONSTRUCTOR_P (r) && !grok_ctor_properties (ctx, r)) return error_mark_node; /* If this is an instantiation of a member template, clone it. If it isn't, that'll be handled by clone_constructors_and_destructors. */ if (PRIMARY_TEMPLATE_P (gen_tmpl)) clone_function_decl (r, /*update_methods=*/false); } else if ((complain & tf_error) != 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/true)) return error_mark_node; /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); if (DECL_DEFAULTED_OUTSIDE_CLASS_P (r) && !processing_template_decl) defaulted_late_check (r); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); if (flag_openmp) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (r))) omp_declare_variant_finalize (r, attr); return r; } /* Subroutine of tsubst_decl for the case when T is a TEMPLATE_DECL. */ static tree tsubst_template_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { /* We can get here when processing a member function template, member class template, or template template parameter. */ tree decl = DECL_TEMPLATE_RESULT (t); tree in_decl = t; tree spec; tree tmpl_args; tree full_args; tree r; hashval_t hash = 0; if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Template template parameter is treated here. */ tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (new_type == error_mark_node) r = error_mark_node; /* If we get a real template back, return it. This can happen in the context of most_specialized_partial_spec. */ else if (TREE_CODE (new_type) == TEMPLATE_DECL) r = new_type; else /* The new TEMPLATE_DECL was built in reduce_template_parm_level. */ r = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (new_type); return r; } if (!lambda_fntype) { /* We might already have an instance of this template. The ARGS are for the surrounding class type, so the full args contain the tsubst'd args for the context, plus the innermost args from the template decl. */ tmpl_args = DECL_CLASS_TEMPLATE_P (t) ? CLASSTYPE_TI_ARGS (TREE_TYPE (t)) : DECL_TI_ARGS (DECL_TEMPLATE_RESULT (t)); /* Because this is a template, the arguments will still be dependent, even after substitution. If PROCESSING_TEMPLATE_DECL is not set, the dependency predicates will short-circuit. */ ++processing_template_decl; full_args = tsubst_template_args (tmpl_args, args, complain, in_decl); --processing_template_decl; if (full_args == error_mark_node) return error_mark_node; /* If this is a default template template argument, tsubst might not have changed anything. */ if (full_args == tmpl_args) return t; hash = hash_tmpl_and_args (t, full_args); spec = retrieve_specialization (t, full_args, hash); if (spec != NULL_TREE) { if (TYPE_P (spec)) /* Type partial instantiations are stored as the type by lookup_template_class_1, not here as the template. */ spec = CLASSTYPE_TI_TEMPLATE (spec); return spec; } } /* Make a new template decl. It will be similar to the original, but will record the current template arguments. We also create a new function declaration, which is just like the old one, but points to this new template, rather than the old one. */ r = copy_decl (t); gcc_assert (DECL_LANG_SPECIFIC (r) != 0); DECL_CHAIN (r) = NULL_TREE; // Build new template info linking to the original template decl. if (!lambda_fntype) { DECL_TEMPLATE_INFO (r) = build_template_info (t, args); SET_DECL_IMPLICIT_INSTANTIATION (r); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* The template parameters for this new template are all the template parameters for the old template, except the outermost level of parameters. */ DECL_TEMPLATE_PARMS (r) = tsubst_template_parms (DECL_TEMPLATE_PARMS (t), args, complain); if (TREE_CODE (decl) == TYPE_DECL && !TYPE_DECL_ALIAS_P (decl)) { tree new_type; ++processing_template_decl; if (CLASS_TYPE_P (TREE_TYPE (t))) new_type = tsubst_aggr_type (TREE_TYPE (t), args, complain, in_decl, /*entering*/1); else new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); --processing_template_decl; if (new_type == error_mark_node) return error_mark_node; TREE_TYPE (r) = new_type; /* For a partial specialization, we need to keep pointing to the primary template. */ if (!DECL_TEMPLATE_SPECIALIZATION (t)) CLASSTYPE_TI_TEMPLATE (new_type) = r; DECL_TEMPLATE_RESULT (r) = TYPE_MAIN_DECL (new_type); DECL_TI_ARGS (r) = CLASSTYPE_TI_ARGS (new_type); DECL_CONTEXT (r) = TYPE_CONTEXT (new_type); } else { tree new_decl; ++processing_template_decl; if (TREE_CODE (decl) == FUNCTION_DECL) new_decl = tsubst_function_decl (decl, args, complain, lambda_fntype); else new_decl = tsubst (decl, args, complain, in_decl); --processing_template_decl; if (new_decl == error_mark_node) return error_mark_node; DECL_TEMPLATE_RESULT (r) = new_decl; TREE_TYPE (r) = TREE_TYPE (new_decl); DECL_CONTEXT (r) = DECL_CONTEXT (new_decl); if (lambda_fntype) { tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (r)); DECL_TEMPLATE_INFO (new_decl) = build_template_info (r, args); } else { DECL_TI_TEMPLATE (new_decl) = r; DECL_TI_ARGS (r) = DECL_TI_ARGS (new_decl); } } DECL_TEMPLATE_INSTANTIATIONS (r) = NULL_TREE; DECL_TEMPLATE_SPECIALIZATIONS (r) = NULL_TREE; if (PRIMARY_TEMPLATE_P (t)) DECL_PRIMARY_TEMPLATE (r) = r; if (TREE_CODE (decl) != TYPE_DECL && !VAR_P (decl) && !lambda_fntype) /* Record this non-type partial instantiation. */ register_specialization (r, t, DECL_TI_ARGS (DECL_TEMPLATE_RESULT (r)), false, hash); return r; } /* True if FN is the op() for a lambda in an uninstantiated template. */ bool lambda_fn_in_template_p (tree fn) { if (!fn || !LAMBDA_FUNCTION_P (fn)) return false; tree closure = DECL_CONTEXT (fn); return CLASSTYPE_TEMPLATE_INFO (closure) != NULL_TREE; } /* True if FN is the substitution (via tsubst_lambda_expr) of a function for which the above is true. */ bool instantiated_lambda_fn_p (tree fn) { if (!fn || !LAMBDA_FUNCTION_P (fn)) return false; tree closure = DECL_CONTEXT (fn); tree lam = CLASSTYPE_LAMBDA_EXPR (closure); return LAMBDA_EXPR_INSTANTIATED (lam); } /* We're instantiating a variable from template function TCTX. Return the corresponding current enclosing scope. This gets complicated because lambda functions in templates are regenerated rather than instantiated, but generic lambda functions are subsequently instantiated. */ static tree enclosing_instantiation_of (tree otctx) { tree tctx = otctx; tree fn = current_function_decl; int lambda_count = 0; for (; tctx && (lambda_fn_in_template_p (tctx) || instantiated_lambda_fn_p (tctx)); tctx = decl_function_context (tctx)) ++lambda_count; for (; fn; fn = decl_function_context (fn)) { tree ofn = fn; int flambda_count = 0; for (; fn && instantiated_lambda_fn_p (fn); fn = decl_function_context (fn)) ++flambda_count; if ((fn && DECL_TEMPLATE_INFO (fn)) ? most_general_template (fn) != most_general_template (tctx) : fn != tctx) continue; if (flambda_count != lambda_count) { gcc_assert (flambda_count > lambda_count); for (; flambda_count > lambda_count; --flambda_count) ofn = decl_function_context (ofn); } gcc_assert (DECL_NAME (ofn) == DECL_NAME (otctx) || DECL_CONV_FN_P (ofn)); return ofn; } gcc_unreachable (); } /* Substitute the ARGS into the T, which is a _DECL. Return the result of the substitution. Issue error and warning messages under control of COMPLAIN. */ static tree tsubst_decl (tree t, tree args, tsubst_flags_t complain) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) location_t saved_loc; tree r = NULL_TREE; tree in_decl = t; hashval_t hash = 0; /* Set the filename and linenumber to improve error-reporting. */ saved_loc = input_location; input_location = DECL_SOURCE_LOCATION (t); switch (TREE_CODE (t)) { case TEMPLATE_DECL: r = tsubst_template_decl (t, args, complain, /*lambda*/NULL_TREE); break; case FUNCTION_DECL: r = tsubst_function_decl (t, args, complain, /*lambda*/NULL_TREE); break; case PARM_DECL: { tree type = NULL_TREE; int i, len = 1; tree expanded_types = NULL_TREE; tree prev_r = NULL_TREE; tree first_r = NULL_TREE; if (DECL_PACK_P (t)) { /* If there is a local specialization that isn't a parameter pack, it means that we're doing a "simple" substitution from inside tsubst_pack_expansion. Just return the local specialization (which will be a single parm). */ tree spec = retrieve_local_specialization (t); if (spec && TREE_CODE (spec) == PARM_DECL && TREE_CODE (TREE_TYPE (spec)) != TYPE_PACK_EXPANSION) RETURN (spec); /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); /* Zero-length parameter packs are boring. Just substitute into the chain. */ if (len == 0 && !cp_unevaluated_operand) RETURN (tsubst (TREE_CHAIN (t), args, complain, TREE_CHAIN (t))); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } /* Loop through all of the parameters we'll build. When T is a function parameter pack, LEN is the number of expanded types in EXPANDED_TYPES; otherwise, LEN is 1. */ r = NULL_TREE; for (i = 0; i < len; ++i) { prev_r = r; r = copy_node (t); if (DECL_TEMPLATE_PARM_P (t)) SET_DECL_TEMPLATE_PARM_P (r); if (expanded_types) /* We're on the Ith parameter of the function parameter pack. */ { /* Get the Ith type. */ type = TREE_VEC_ELT (expanded_types, i); /* Rename the parameter to include the index. */ DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) /* We're dealing with a normal parameter. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); type = type_decays_to (type); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_INITIAL (r)) { if (TREE_CODE (DECL_INITIAL (r)) != TEMPLATE_PARM_INDEX) DECL_INITIAL (r) = TREE_TYPE (r); else DECL_INITIAL (r) = tsubst (DECL_INITIAL (r), args, complain, in_decl); } DECL_CONTEXT (r) = NULL_TREE; if (!DECL_TEMPLATE_PARM_P (r)) DECL_ARG_TYPE (r) = type_passed_as (type); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); /* Keep track of the first new parameter we generate. That's what will be returned to the caller. */ if (!first_r) first_r = r; /* Build a proper chain of parameters when substituting into a function parameter pack. */ if (prev_r) DECL_CHAIN (prev_r) = r; } /* If cp_unevaluated_operand is set, we're just looking for a single dummy parameter, so don't keep going. */ if (DECL_CHAIN (t) && !cp_unevaluated_operand) DECL_CHAIN (r) = tsubst (DECL_CHAIN (t), args, complain, DECL_CHAIN (t)); /* FIRST_R contains the start of the chain we've built. */ r = first_r; } break; case FIELD_DECL: { tree type = NULL_TREE; tree vec = NULL_TREE; tree expanded_types = NULL_TREE; int len = 1; if (PACK_EXPANSION_P (TREE_TYPE (t))) { /* This field is a lambda capture pack. Return a TREE_VEC of the expanded fields to instantiate_class_template_1. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); vec = make_tree_vec (len); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } for (int i = 0; i < len; ++i) { r = copy_decl (t); if (expanded_types) { type = TREE_VEC_ELT (expanded_types, i); DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_C_BIT_FIELD (r)) /* For bit-fields, DECL_BIT_FIELD_REPRESENTATIVE gives the number of bits. */ DECL_BIT_FIELD_REPRESENTATIVE (r) = tsubst_expr (DECL_BIT_FIELD_REPRESENTATIVE (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); if (DECL_INITIAL (t)) { /* Set up DECL_TEMPLATE_INFO so that we can get at the NSDMI in perform_member_init. Still set DECL_INITIAL so that we know there is one. */ DECL_INITIAL (r) = void_node; gcc_assert (DECL_LANG_SPECIFIC (r) == NULL); retrofit_lang_decl (r); DECL_TEMPLATE_INFO (r) = build_template_info (t, args); } /* We don't have to set DECL_CONTEXT here; it is set by finish_member_declaration. */ DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); if (vec) TREE_VEC_ELT (vec, i) = r; } if (vec) r = vec; } break; case USING_DECL: /* We reach here only for member using decls. We also need to check uses_template_parms because DECL_DEPENDENT_P is not set for a using-declaration that designates a member of the current instantiation (c++/53549). */ if (DECL_DEPENDENT_P (t) || uses_template_parms (USING_DECL_SCOPE (t))) { tree scope = USING_DECL_SCOPE (t); tree name = tsubst_copy (DECL_NAME (t), args, complain, in_decl); if (PACK_EXPANSION_P (scope)) { tree vec = tsubst_pack_expansion (scope, args, complain, in_decl); int len = TREE_VEC_LENGTH (vec); r = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree escope = TREE_VEC_ELT (vec, i); tree elt = do_class_using_decl (escope, name); if (!elt) { r = error_mark_node; break; } else { TREE_PROTECTED (elt) = TREE_PROTECTED (t); TREE_PRIVATE (elt) = TREE_PRIVATE (t); } TREE_VEC_ELT (r, i) = elt; } } else { tree inst_scope = tsubst_copy (USING_DECL_SCOPE (t), args, complain, in_decl); r = do_class_using_decl (inst_scope, name); if (!r) r = error_mark_node; else { TREE_PROTECTED (r) = TREE_PROTECTED (t); TREE_PRIVATE (r) = TREE_PRIVATE (t); } } } else { r = copy_node (t); DECL_CHAIN (r) = NULL_TREE; } break; case TYPE_DECL: case VAR_DECL: { tree argvec = NULL_TREE; tree gen_tmpl = NULL_TREE; tree spec; tree tmpl = NULL_TREE; tree ctx; tree type = NULL_TREE; bool local_p; if (TREE_TYPE (t) == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (t) == TYPE_DECL && t == TYPE_MAIN_DECL (TREE_TYPE (t))) { /* If this is the canonical decl, we don't have to mess with instantiations, and often we can't (for typename, template type parms and such). Note that TYPE_NAME is not correct for the above test if we've copied the type for a typedef. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); r = TYPE_NAME (type); break; } /* Check to see if we already have the specialization we need. */ spec = NULL_TREE; if (DECL_CLASS_SCOPE_P (t) || DECL_NAMESPACE_SCOPE_P (t)) { /* T is a static data member or namespace-scope entity. We have to substitute into namespace-scope variables (not just variable templates) because of cases like: template <class T> void f() { extern T t; } where the entity referenced is not known until instantiation time. */ local_p = false; ctx = DECL_CONTEXT (t); if (DECL_CLASS_SCOPE_P (t)) { ctx = tsubst_aggr_type (ctx, args, complain, in_decl, /*entering_scope=*/1); /* If CTX is unchanged, then T is in fact the specialization we want. That situation occurs when referencing a static data member within in its own class. We can use pointer equality, rather than same_type_p, because DECL_CONTEXT is always canonical... */ if (ctx == DECL_CONTEXT (t) /* ... unless T is a member template; in which case our caller can be willing to create a specialization of that template represented by T. */ && !(DECL_TI_TEMPLATE (t) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (t)))) spec = t; } if (!spec) { tmpl = DECL_TI_TEMPLATE (t); gen_tmpl = most_general_template (tmpl); argvec = tsubst (DECL_TI_ARGS (t), args, complain, in_decl); if (argvec != error_mark_node) argvec = (coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (gen_tmpl), argvec, t, complain, /*all*/true, /*defarg*/true)); if (argvec == error_mark_node) RETURN (error_mark_node); hash = hash_tmpl_and_args (gen_tmpl, argvec); spec = retrieve_specialization (gen_tmpl, argvec, hash); } } else { /* A local variable. */ local_p = true; /* Subsequent calls to pushdecl will fill this in. */ ctx = NULL_TREE; /* Unless this is a reference to a static variable from an enclosing function, in which case we need to fill it in now. */ if (TREE_STATIC (t)) { tree fn = enclosing_instantiation_of (DECL_CONTEXT (t)); if (fn != current_function_decl) ctx = fn; } spec = retrieve_local_specialization (t); } /* If we already have the specialization we need, there is nothing more to do. */ if (spec) { r = spec; break; } /* Create a new node for the specialization we need. */ if (type == NULL_TREE) { if (is_typedef_decl (t)) type = DECL_ORIGINAL_TYPE (t); else type = TREE_TYPE (t); if (VAR_P (t) && VAR_HAD_UNKNOWN_BOUND (t) && type != error_mark_node) type = strip_array_domain (type); tree sub_args = args; if (tree auto_node = type_uses_auto (type)) { /* Mask off any template args past the variable's context so we don't replace the auto with an unrelated argument. */ int nouter = TEMPLATE_TYPE_LEVEL (auto_node) - 1; int extra = TMPL_ARGS_DEPTH (args) - nouter; if (extra > 0) /* This should never happen with the new lambda instantiation model, but keep the handling just in case. */ gcc_assert (!CHECKING_P), sub_args = strip_innermost_template_args (args, extra); } type = tsubst (type, sub_args, complain, in_decl); /* Substituting the type might have recursively instantiated this same alias (c++/86171). */ if (gen_tmpl && DECL_ALIAS_TEMPLATE_P (gen_tmpl) && (spec = retrieve_specialization (gen_tmpl, argvec, hash))) { r = spec; break; } } r = copy_decl (t); if (VAR_P (r)) { DECL_INITIALIZED_P (r) = 0; DECL_TEMPLATE_INSTANTIATED (r) = 0; if (type == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (type) == FUNCTION_TYPE) { /* It may seem that this case cannot occur, since: typedef void f(); void g() { f x; } declares a function, not a variable. However: typedef void f(); template <typename T> void g() { T t; } template void g<f>(); is an attempt to declare a variable with function type. */ error ("variable %qD has function type", /* R is not yet sufficiently initialized, so we just use its name. */ DECL_NAME (r)); RETURN (error_mark_node); } type = complete_type (type); /* Wait until cp_finish_decl to set this again, to handle circular dependency (template/instantiate6.C). */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = 0; type = check_var_type (DECL_NAME (r), type, DECL_SOURCE_LOCATION (r)); if (DECL_HAS_VALUE_EXPR_P (t)) { tree ve = DECL_VALUE_EXPR (t); /* If the DECL_VALUE_EXPR is converted to the declared type, preserve the identity so that gimplify_type_sizes works. */ bool nop = (TREE_CODE (ve) == NOP_EXPR); if (nop) ve = TREE_OPERAND (ve, 0); ve = tsubst_expr (ve, args, complain, in_decl, /*constant_expression_p=*/false); if (REFERENCE_REF_P (ve)) { gcc_assert (TYPE_REF_P (type)); ve = TREE_OPERAND (ve, 0); } if (nop) ve = build_nop (type, ve); else gcc_checking_assert (TREE_TYPE (ve) == type); SET_DECL_VALUE_EXPR (r, ve); } if (CP_DECL_THREAD_LOCAL_P (r) && !processing_template_decl) set_decl_tls_model (r, decl_default_tls_model (r)); } else if (DECL_SELF_REFERENCE_P (t)) SET_DECL_SELF_REFERENCE_P (r); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); DECL_CONTEXT (r) = ctx; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL)) SET_DECL_RTL (r, NULL); /* The initializer must not be expanded until it is required; see [temp.inst]. */ DECL_INITIAL (r) = NULL_TREE; DECL_SIZE (r) = DECL_SIZE_UNIT (r) = 0; if (VAR_P (r)) { if (DECL_LANG_SPECIFIC (r)) SET_DECL_DEPENDENT_INIT_P (r, false); SET_DECL_MODE (r, VOIDmode); /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); } if (!local_p) { /* A static data member declaration is always marked external when it is declared in-class, even if an initializer is present. We mimic the non-template processing here. */ DECL_EXTERNAL (r) = 1; if (DECL_NAMESPACE_SCOPE_P (t)) DECL_NOT_REALLY_EXTERN (r) = 1; DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); /* Remember whether we require constant initialization of a non-constant template variable. */ TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (r)) = TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (t)); if (!error_operand_p (r) || (complain & tf_error)) register_specialization (r, gen_tmpl, argvec, false, hash); } else { if (DECL_LANG_SPECIFIC (r)) DECL_TEMPLATE_INFO (r) = NULL_TREE; if (!cp_unevaluated_operand) register_local_specialization (r, t); } DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), /*flags=*/0, args, complain, in_decl); /* Preserve a typedef that names a type. */ if (is_typedef_decl (r) && type != error_mark_node) { DECL_ORIGINAL_TYPE (r) = NULL_TREE; set_underlying_type (r); if (TYPE_DECL_ALIAS_P (r)) /* An alias template specialization can be dependent even if its underlying type is not. */ TYPE_DEPENDENT_P_VALID (TREE_TYPE (r)) = false; } layout_decl (r, 0); } break; default: gcc_unreachable (); } #undef RETURN out: /* Restore the file and line information. */ input_location = saved_loc; return r; } /* Substitute into the complete parameter type list PARMS. */ tree tsubst_function_parms (tree parms, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_arg_types (parms, args, NULL_TREE, complain, in_decl); } /* Substitute into the ARG_TYPES of a function type. If END is a TREE_CHAIN, leave it and any following types un-substituted. */ static tree tsubst_arg_types (tree arg_types, tree args, tree end, tsubst_flags_t complain, tree in_decl) { tree remaining_arg_types; tree type = NULL_TREE; int i = 1; tree expanded_args = NULL_TREE; tree default_arg; if (!arg_types || arg_types == void_list_node || arg_types == end) return arg_types; remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types), args, end, complain, in_decl); if (remaining_arg_types == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (TREE_VALUE (arg_types))) { /* For a pack expansion, perform substitution on the entire expression. Later on, we'll handle the arguments one-by-one. */ expanded_args = tsubst_pack_expansion (TREE_VALUE (arg_types), args, complain, in_decl); if (TREE_CODE (expanded_args) == TREE_VEC) /* So that we'll spin through the parameters, one by one. */ i = TREE_VEC_LENGTH (expanded_args); else { /* We only partially substituted into the parameter pack. Our type is TYPE_PACK_EXPANSION. */ type = expanded_args; expanded_args = NULL_TREE; } } while (i > 0) { --i; if (expanded_args) type = TREE_VEC_ELT (expanded_args, i); else if (!type) type = tsubst (TREE_VALUE (arg_types), args, complain, in_decl); if (type == error_mark_node) return error_mark_node; if (VOID_TYPE_P (type)) { if (complain & tf_error) { error ("invalid parameter type %qT", type); if (in_decl) error ("in declaration %q+D", in_decl); } return error_mark_node; } /* DR 657. */ if (abstract_virtuals_error_sfinae (ACU_PARM, type, complain)) return error_mark_node; /* Do array-to-pointer, function-to-pointer conversion, and ignore top-level qualifiers as required. */ type = cv_unqualified (type_decays_to (type)); /* We do not substitute into default arguments here. The standard mandates that they be instantiated only when needed, which is done in build_over_call. */ default_arg = TREE_PURPOSE (arg_types); /* Except that we do substitute default arguments under tsubst_lambda_expr, since the new op() won't have any associated template arguments for us to refer to later. */ if (lambda_fn_in_template_p (in_decl)) default_arg = tsubst_copy_and_build (default_arg, args, complain, in_decl, false/*fn*/, false/*constexpr*/); if (default_arg && TREE_CODE (default_arg) == DEFERRED_PARSE) { /* We've instantiated a template before its default arguments have been parsed. This can happen for a nested template class, and is not an error unless we require the default argument in a call of this function. */ remaining_arg_types = tree_cons (default_arg, type, remaining_arg_types); vec_safe_push (DEFPARSE_INSTANTIATIONS (default_arg), remaining_arg_types); } else remaining_arg_types = hash_tree_cons (default_arg, type, remaining_arg_types); } return remaining_arg_types; } /* Substitute into a FUNCTION_TYPE or METHOD_TYPE. This routine does *not* handle the exception-specification for FNTYPE, because the initial substitution of explicitly provided template parameters during argument deduction forbids substitution into the exception-specification: [temp.deduct] All references in the function type of the function template to the corresponding template parameters are replaced by the specified tem- plate argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. [Note: The equivalent substitution in exception specifications is done only when the function is instanti- ated, at which point a program is ill-formed if the substitution results in an invalid type.] */ static tree tsubst_function_type (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree return_type; tree arg_types = NULL_TREE; /* The TYPE_CONTEXT is not used for function/method types. */ gcc_assert (TYPE_CONTEXT (t) == NULL_TREE); /* DR 1227: Mixing immediate and non-immediate contexts in deduction failure. */ bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t); if (late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; tree save_ccp = current_class_ptr; tree save_ccr = current_class_ref; tree this_type = (TREE_CODE (t) == METHOD_TYPE ? TREE_TYPE (TREE_VALUE (arg_types)) : NULL_TREE); bool do_inject = this_type && CLASS_TYPE_P (this_type); if (do_inject) { /* DR 1207: 'this' is in scope in the trailing return type. */ inject_this_parameter (this_type, cp_type_quals (this_type)); } /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (do_inject) { current_class_ptr = save_ccp; current_class_ref = save_ccr; } } else /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (return_type == error_mark_node) return error_mark_node; /* DR 486 clarifies that creation of a function type with an invalid return type is a deduction failure. */ if (TREE_CODE (return_type) == ARRAY_TYPE || TREE_CODE (return_type) == FUNCTION_TYPE) { if (complain & tf_error) { if (TREE_CODE (return_type) == ARRAY_TYPE) error ("function returning an array"); else error ("function returning a function"); } return error_mark_node; } /* And DR 657. */ if (abstract_virtuals_error_sfinae (ACU_RETURN, return_type, complain)) return error_mark_node; if (!late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; } /* Construct a new type node and return it. */ return rebuild_function_or_method_type (t, return_type, arg_types, /*raises=*/NULL_TREE, complain); } /* FNTYPE is a FUNCTION_TYPE or METHOD_TYPE. Substitute the template ARGS into that specification, and return the substituted specification. If there is no specification, return NULL_TREE. */ static tree tsubst_exception_specification (tree fntype, tree args, tsubst_flags_t complain, tree in_decl, bool defer_ok) { tree specs; tree new_specs; specs = TYPE_RAISES_EXCEPTIONS (fntype); new_specs = NULL_TREE; if (specs && TREE_PURPOSE (specs)) { /* A noexcept-specifier. */ tree expr = TREE_PURPOSE (specs); if (TREE_CODE (expr) == INTEGER_CST) new_specs = expr; else if (defer_ok) { /* Defer instantiation of noexcept-specifiers to avoid excessive instantiations (c++/49107). */ new_specs = make_node (DEFERRED_NOEXCEPT); if (DEFERRED_NOEXCEPT_SPEC_P (specs)) { /* We already partially instantiated this member template, so combine the new args with the old. */ DEFERRED_NOEXCEPT_PATTERN (new_specs) = DEFERRED_NOEXCEPT_PATTERN (expr); DEFERRED_NOEXCEPT_ARGS (new_specs) = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args); } else { DEFERRED_NOEXCEPT_PATTERN (new_specs) = expr; DEFERRED_NOEXCEPT_ARGS (new_specs) = args; } } else { if (DEFERRED_NOEXCEPT_SPEC_P (specs)) { args = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args); expr = DEFERRED_NOEXCEPT_PATTERN (expr); } new_specs = tsubst_copy_and_build (expr, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/true); } new_specs = build_noexcept_spec (new_specs, complain); } else if (specs) { if (! TREE_VALUE (specs)) new_specs = specs; else while (specs) { tree spec; int i, len = 1; tree expanded_specs = NULL_TREE; if (PACK_EXPANSION_P (TREE_VALUE (specs))) { /* Expand the pack expansion type. */ expanded_specs = tsubst_pack_expansion (TREE_VALUE (specs), args, complain, in_decl); if (expanded_specs == error_mark_node) return error_mark_node; else if (TREE_CODE (expanded_specs) == TREE_VEC) len = TREE_VEC_LENGTH (expanded_specs); else { /* We're substituting into a member template, so we got a TYPE_PACK_EXPANSION back. Add that expansion and move on. */ gcc_assert (TREE_CODE (expanded_specs) == TYPE_PACK_EXPANSION); new_specs = add_exception_specifier (new_specs, expanded_specs, complain); specs = TREE_CHAIN (specs); continue; } } for (i = 0; i < len; ++i) { if (expanded_specs) spec = TREE_VEC_ELT (expanded_specs, i); else spec = tsubst (TREE_VALUE (specs), args, complain, in_decl); if (spec == error_mark_node) return spec; new_specs = add_exception_specifier (new_specs, spec, complain); } specs = TREE_CHAIN (specs); } } return new_specs; } /* Take the tree structure T and replace template parameters used therein with the argument vector ARGS. IN_DECL is an associated decl for diagnostics. If an error occurs, returns ERROR_MARK_NODE. Issue error and warning messages under control of COMPLAIN. Note that we must be relatively non-tolerant of extensions here, in order to preserve conformance; if we allow substitutions that should not be allowed, we may allow argument deductions that should not succeed, and therefore report ambiguous overload situations where there are none. In theory, we could allow the substitution, but indicate that it should have failed, and allow our caller to make sure that the right thing happens, but we don't try to do this yet. This function is used for dealing with types, decls and the like; for expressions, use tsubst_expr or tsubst_copy. */ tree tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree type, r = NULL_TREE; if (t == NULL_TREE || t == error_mark_node || t == integer_type_node || t == void_type_node || t == char_type_node || t == unknown_type_node || TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == TRANSLATION_UNIT_DECL) return t; if (DECL_P (t)) return tsubst_decl (t, args, complain); if (args == NULL_TREE) return t; code = TREE_CODE (t); if (code == IDENTIFIER_NODE) type = IDENTIFIER_TYPE_VALUE (t); else type = TREE_TYPE (t); gcc_assert (type != unknown_type_node); /* Reuse typedefs. We need to do this to handle dependent attributes, such as attribute aligned. */ if (TYPE_P (t) && typedef_variant_p (t)) { tree decl = TYPE_NAME (t); if (alias_template_specialization_p (t, nt_opaque)) { /* DECL represents an alias template and we want to instantiate it. */ tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = instantiate_alias_template (tmpl, gen_args, complain); } else if (DECL_CLASS_SCOPE_P (decl) && CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_CONTEXT (decl))) { tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = retrieve_specialization (tmpl, gen_args, 0); } else if (DECL_FUNCTION_SCOPE_P (decl) && DECL_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_TI_ARGS (DECL_CONTEXT (decl)))) r = retrieve_local_specialization (decl); else /* The typedef is from a non-template context. */ return t; if (r) { r = TREE_TYPE (r); r = cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain | tf_ignore_bad_quals); return r; } else { /* We don't have an instantiation yet, so drop the typedef. */ int quals = cp_type_quals (t); t = DECL_ORIGINAL_TYPE (decl); t = cp_build_qualified_type_real (t, quals, complain | tf_ignore_bad_quals); } } bool fndecl_type = (complain & tf_fndecl_type); complain &= ~tf_fndecl_type; if (type && code != TYPENAME_TYPE && code != TEMPLATE_TYPE_PARM && code != TEMPLATE_PARM_INDEX && code != IDENTIFIER_NODE && code != FUNCTION_TYPE && code != METHOD_TYPE) type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; switch (code) { case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: return tsubst_aggr_type (t, args, complain, in_decl, /*entering_scope=*/0); case ERROR_MARK: case IDENTIFIER_NODE: case VOID_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case BOOLEAN_TYPE: case NULLPTR_TYPE: case LANG_TYPE: return t; case INTEGER_TYPE: if (t == integer_type_node) return t; if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST && TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST) return t; { tree max, omax = TREE_OPERAND (TYPE_MAX_VALUE (t), 0); max = tsubst_expr (omax, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Fix up type of the magic NOP_EXPR with TREE_SIDE_EFFECTS if needed. */ if (TREE_CODE (max) == NOP_EXPR && TREE_SIDE_EFFECTS (omax) && !TREE_TYPE (max)) TREE_TYPE (max) = TREE_TYPE (TREE_OPERAND (max, 0)); /* If we're in a partial instantiation, preserve the magic NOP_EXPR with TREE_SIDE_EFFECTS that indicates this is not an integral constant expression. */ if (processing_template_decl && TREE_SIDE_EFFECTS (omax) && TREE_CODE (omax) == NOP_EXPR) { gcc_assert (TREE_CODE (max) == NOP_EXPR); TREE_SIDE_EFFECTS (max) = 1; } return compute_array_index_type (NULL_TREE, max, complain); } case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: { int idx; int level; int levels; tree arg = NULL_TREE; r = NULL_TREE; gcc_assert (TREE_VEC_LENGTH (args) > 0); template_parm_level_and_index (t, &level, &idx); levels = TMPL_ARGS_DEPTH (args); if (level <= levels && TREE_VEC_LENGTH (TMPL_ARGS_LEVEL (args, level)) > 0) { arg = TMPL_ARG (args, level, idx); /* See through ARGUMENT_PACK_SELECT arguments. */ if (arg && TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); } if (arg == error_mark_node) return error_mark_node; else if (arg != NULL_TREE) { if (ARGUMENT_PACK_P (arg)) /* If ARG is an argument pack, we don't actually want to perform a substitution here, because substitutions for argument packs are only done element-by-element. We can get to this point when substituting the type of a non-type template parameter pack, when that type actually contains template parameter packs from an outer template, e.g., template<typename... Types> struct A { template<Types... Values> struct B { }; }; */ return t; if (code == TEMPLATE_TYPE_PARM) { int quals; /* When building concept checks for the purpose of deducing placeholders, we can end up with wildcards where types are expected. Adjust this to the deduced value. */ if (TREE_CODE (arg) == WILDCARD_DECL) arg = TREE_TYPE (TREE_TYPE (arg)); gcc_assert (TYPE_P (arg)); quals = cp_type_quals (arg) | cp_type_quals (t); return cp_build_qualified_type_real (arg, quals, complain | tf_ignore_bad_quals); } else if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { /* We are processing a type constructed from a template template parameter. */ tree argvec = tsubst (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == TEMPLATE_DECL || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE) /* Consider this code: template <template <class> class Template> struct Internal { template <class Arg> using Bind = Template<Arg>; }; template <template <class> class Template, class Arg> using Instantiate = Template<Arg>; //#0 template <template <class> class Template, class Argument> using Bind = Instantiate<Internal<Template>::template Bind, Argument>; //#1 When #1 is parsed, the BOUND_TEMPLATE_TEMPLATE_PARM representing the parameter `Template' in #0 matches the UNBOUND_CLASS_TEMPLATE representing the argument `Internal<Template>::template Bind'; We then want to assemble the type `Bind<Argument>' that can't be fully created right now, because `Internal<Template>' not being complete, the Bind template cannot be looked up in that context. So we need to "store" `Bind<Argument>' for later when the context of Bind becomes complete. Let's store that in a TYPENAME_TYPE. */ return make_typename_type (TYPE_CONTEXT (arg), build_nt (TEMPLATE_ID_EXPR, TYPE_IDENTIFIER (arg), argvec), typename_type, complain); /* We can get a TEMPLATE_TEMPLATE_PARM here when we are resolving nested-types in the signature of a member function templates. Otherwise ARG is a TEMPLATE_DECL and is the real template to be instantiated. */ if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) arg = TYPE_NAME (arg); r = lookup_template_class (arg, argvec, in_decl, DECL_CONTEXT (arg), /*entering_scope=*/0, complain); return cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain); } else if (code == TEMPLATE_TEMPLATE_PARM) return arg; else /* TEMPLATE_PARM_INDEX. */ return convert_from_reference (unshare_expr (arg)); } if (level == 1) /* This can happen during the attempted tsubst'ing in unify. This means that we don't yet have any information about the template parameter in question. */ return t; /* Early in template argument deduction substitution, we don't want to reduce the level of 'auto', or it will be confused with a normal template parm in subsequent deduction. Similarly, don't reduce the level of template parameters to avoid mismatches when deducing their types. */ if (complain & tf_partial) return t; /* If we get here, we must have been looking at a parm for a more deeply nested template. Make a new version of this template parameter, but with a lower level. */ switch (code) { case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: if (cp_type_quals (t)) { r = tsubst (TYPE_MAIN_VARIANT (t), args, complain, in_decl); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain | (code == TEMPLATE_TYPE_PARM ? tf_ignore_bad_quals : 0)); } else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM && PLACEHOLDER_TYPE_CONSTRAINTS (t) && (r = (TEMPLATE_PARM_DESCENDANTS (TEMPLATE_TYPE_PARM_INDEX (t)))) && (r = TREE_TYPE (r)) && !PLACEHOLDER_TYPE_CONSTRAINTS (r)) /* Break infinite recursion when substituting the constraints of a constrained placeholder. */; else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM && !PLACEHOLDER_TYPE_CONSTRAINTS (t) && !CLASS_PLACEHOLDER_TEMPLATE (t) && (arg = TEMPLATE_TYPE_PARM_INDEX (t), r = TEMPLATE_PARM_DESCENDANTS (arg)) && (TEMPLATE_PARM_LEVEL (r) == TEMPLATE_PARM_LEVEL (arg) - levels)) /* Cache the simple case of lowering a type parameter. */ r = TREE_TYPE (r); else { r = copy_type (t); TEMPLATE_TYPE_PARM_INDEX (r) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (t), r, levels, args, complain); TYPE_STUB_DECL (r) = TYPE_NAME (r) = TEMPLATE_TYPE_DECL (r); TYPE_MAIN_VARIANT (r) = r; TYPE_POINTER_TO (r) = NULL_TREE; TYPE_REFERENCE_TO (r) = NULL_TREE; if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) { /* Propagate constraints on placeholders since they are only instantiated during satisfaction. */ if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t)) PLACEHOLDER_TYPE_CONSTRAINTS (r) = constr; else if (tree pl = CLASS_PLACEHOLDER_TEMPLATE (t)) { pl = tsubst_copy (pl, args, complain, in_decl); CLASS_PLACEHOLDER_TEMPLATE (r) = pl; } } if (TREE_CODE (r) == TEMPLATE_TEMPLATE_PARM) /* We have reduced the level of the template template parameter, but not the levels of its template parameters, so canonical_type_parameter will not be able to find the canonical template template parameter for this level. Thus, we require structural equality checking to compare TEMPLATE_TEMPLATE_PARMs. */ SET_TYPE_STRUCTURAL_EQUALITY (r); else if (TYPE_STRUCTURAL_EQUALITY_P (t)) SET_TYPE_STRUCTURAL_EQUALITY (r); else TYPE_CANONICAL (r) = canonical_type_parameter (r); if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { tree tinfo = TYPE_TEMPLATE_INFO (t); /* We might need to substitute into the types of non-type template parameters. */ tree tmpl = tsubst (TI_TEMPLATE (tinfo), args, complain, in_decl); if (tmpl == error_mark_node) return error_mark_node; tree argvec = tsubst (TI_ARGS (tinfo), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); } } break; case TEMPLATE_PARM_INDEX: /* OK, now substitute the type of the non-type parameter. We couldn't do it earlier because it might be an auto parameter, and we wouldn't need to if we had an argument. */ type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; r = reduce_template_parm_level (t, type, levels, args, complain); break; default: gcc_unreachable (); } return r; } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) return t; if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t))) || (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t)))) { /* We have pack expansions, so expand those and create a new list out of it. */ /* Expand the argument expressions. */ tree purposevec = NULL_TREE; if (TREE_PURPOSE (t)) purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args, complain, in_decl); if (purposevec == error_mark_node) return error_mark_node; tree valuevec = NULL_TREE; if (TREE_VALUE (t)) valuevec = tsubst_pack_expansion (TREE_VALUE (t), args, complain, in_decl); if (valuevec == error_mark_node) return error_mark_node; /* Build the rest of the list. */ tree chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = tsubst (chain, args, complain, in_decl); if (chain == error_mark_node) return error_mark_node; /* Determine the number of arguments. */ int len = -1; if (purposevec && TREE_CODE (purposevec) == TREE_VEC) { len = TREE_VEC_LENGTH (purposevec); gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec)); } else if (TREE_CODE (valuevec) == TREE_VEC) len = TREE_VEC_LENGTH (valuevec); else { /* Since we only performed a partial substitution into the argument pack, we only RETURN (a single list node. */ if (purposevec == TREE_PURPOSE (t) && valuevec == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return tree_cons (purposevec, valuevec, chain); } /* Convert the argument vectors into a TREE_LIST. */ for (int i = len; i-- > 0; ) { purpose = (purposevec ? TREE_VEC_ELT (purposevec, i) : NULL_TREE); value = (valuevec ? TREE_VEC_ELT (valuevec, i) : NULL_TREE); /* Build the list (backwards). */ chain = hash_tree_cons (purpose, value, chain); } return chain; } purpose = TREE_PURPOSE (t); if (purpose) { purpose = tsubst (purpose, args, complain, in_decl); if (purpose == error_mark_node) return error_mark_node; } value = TREE_VALUE (t); if (value) { value = tsubst (value, args, complain, in_decl); if (value == error_mark_node) return error_mark_node; } chain = TREE_CHAIN (t); if (chain && chain != void_type_node) { chain = tsubst (chain, args, complain, in_decl); if (chain == error_mark_node) return error_mark_node; } if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return hash_tree_cons (purpose, value, chain); } case TREE_BINFO: /* We should never be tsubsting a binfo. */ gcc_unreachable (); case TREE_VEC: /* A vector of template arguments. */ gcc_assert (!type); return tsubst_template_args (t, args, complain, in_decl); case POINTER_TYPE: case REFERENCE_TYPE: { if (type == TREE_TYPE (t) && TREE_CODE (type) != METHOD_TYPE) return t; /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create a pointer to reference type. -- Attempting to create a reference to a reference type or a reference to void. Core issue 106 says that creating a reference to a reference during instantiation is no longer a cause for failure. We only enforce this check in strict C++98 mode. */ if ((TYPE_REF_P (type) && (((cxx_dialect == cxx98) && flag_iso) || code != REFERENCE_TYPE)) || (code == REFERENCE_TYPE && VOID_TYPE_P (type))) { static location_t last_loc; /* We keep track of the last time we issued this error message to avoid spewing a ton of messages during a single bad template instantiation. */ if (complain & tf_error && last_loc != input_location) { if (VOID_TYPE_P (type)) error ("forming reference to void"); else if (code == POINTER_TYPE) error ("forming pointer to reference type %qT", type); else error ("forming reference to reference type %qT", type); last_loc = input_location; } return error_mark_node; } else if (TREE_CODE (type) == FUNCTION_TYPE && (type_memfn_quals (type) != TYPE_UNQUALIFIED || type_memfn_rqual (type) != REF_QUAL_NONE)) { if (complain & tf_error) { if (code == POINTER_TYPE) error ("forming pointer to qualified function type %qT", type); else error ("forming reference to qualified function type %qT", type); } return error_mark_node; } else if (code == POINTER_TYPE) { r = build_pointer_type (type); if (TREE_CODE (type) == METHOD_TYPE) r = build_ptrmemfunc_type (r); } else if (TYPE_REF_P (type)) /* In C++0x, during template argument substitution, when there is an attempt to create a reference to a reference type, reference collapsing is applied as described in [14.3.1/4 temp.arg.type]: "If a template-argument for a template-parameter T names a type that is a reference to a type A, an attempt to create the type 'lvalue reference to cv T' creates the type 'lvalue reference to A,' while an attempt to create the type type rvalue reference to cv T' creates the type T" */ r = cp_build_reference_type (TREE_TYPE (type), TYPE_REF_IS_RVALUE (t) && TYPE_REF_IS_RVALUE (type)); else r = cp_build_reference_type (type, TYPE_REF_IS_RVALUE (t)); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); if (r != error_mark_node) /* Will this ever be needed for TYPE_..._TO values? */ layout_type (r); return r; } case OFFSET_TYPE: { r = tsubst (TYPE_OFFSET_BASETYPE (t), args, complain, in_decl); if (r == error_mark_node || !MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member of non-class type %qT", r); return error_mark_node; } if (TYPE_REF_P (type)) { if (complain & tf_error) error ("creating pointer to member reference type %qT", type); return error_mark_node; } if (VOID_TYPE_P (type)) { if (complain & tf_error) error ("creating pointer to member of type void"); return error_mark_node; } gcc_assert (TREE_CODE (type) != METHOD_TYPE); if (TREE_CODE (type) == FUNCTION_TYPE) { /* The type of the implicit object parameter gets its cv-qualifiers from the FUNCTION_TYPE. */ tree memptr; tree method_type = build_memfn_type (type, r, type_memfn_quals (type), type_memfn_rqual (type)); memptr = build_ptrmemfunc_type (build_pointer_type (method_type)); return cp_build_qualified_type_real (memptr, cp_type_quals (t), complain); } else return cp_build_qualified_type_real (build_ptrmem_type (r, type), cp_type_quals (t), complain); } case FUNCTION_TYPE: case METHOD_TYPE: { tree fntype; tree specs; fntype = tsubst_function_type (t, args, complain, in_decl); if (fntype == error_mark_node) return error_mark_node; /* Substitute the exception specification. */ specs = tsubst_exception_specification (t, args, complain, in_decl, /*defer_ok*/fndecl_type); if (specs == error_mark_node) return error_mark_node; if (specs) fntype = build_exception_variant (fntype, specs); return fntype; } case ARRAY_TYPE: { tree domain = tsubst (TYPE_DOMAIN (t), args, complain, in_decl); if (domain == error_mark_node) return error_mark_node; /* As an optimization, we avoid regenerating the array type if it will obviously be the same as T. */ if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t)) return t; /* These checks should match the ones in create_array_type_for_decl. [temp.deduct] The deduction may fail for any of the following reasons: -- Attempting to create an array with an element type that is void, a function type, or a reference type, or [DR337] an abstract class type. */ if (VOID_TYPE_P (type) || TREE_CODE (type) == FUNCTION_TYPE || (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) || TYPE_REF_P (type)) { if (complain & tf_error) error ("creating array of %qT", type); return error_mark_node; } if (!verify_type_context (input_location, TCTX_ARRAY_ELEMENT, type, !(complain & tf_error))) return error_mark_node; if (abstract_virtuals_error_sfinae (ACU_ARRAY, type, complain)) return error_mark_node; r = build_cplus_array_type (type, domain); if (!valid_array_size_p (input_location, r, in_decl, (complain & tf_error))) return error_mark_node; if (TYPE_USER_ALIGN (t)) { SET_TYPE_ALIGN (r, TYPE_ALIGN (t)); TYPE_USER_ALIGN (r) = 1; } return r; } case TYPENAME_TYPE: { tree ctx = TYPE_CONTEXT (t); if (TREE_CODE (ctx) == TYPE_PACK_EXPANSION) { ctx = tsubst_pack_expansion (ctx, args, complain, in_decl); if (ctx == error_mark_node || TREE_VEC_LENGTH (ctx) > 1) return error_mark_node; if (TREE_VEC_LENGTH (ctx) == 0) { if (complain & tf_error) error ("%qD is instantiated for an empty pack", TYPENAME_TYPE_FULLNAME (t)); return error_mark_node; } ctx = TREE_VEC_ELT (ctx, 0); } else ctx = tsubst_aggr_type (ctx, args, complain, in_decl, /*entering_scope=*/1); if (ctx == error_mark_node) return error_mark_node; tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args, complain, in_decl); if (f == error_mark_node) return error_mark_node; if (!MAYBE_CLASS_TYPE_P (ctx)) { if (complain & tf_error) error ("%qT is not a class, struct, or union type", ctx); return error_mark_node; } else if (!uses_template_parms (ctx) && !TYPE_BEING_DEFINED (ctx)) { /* Normally, make_typename_type does not require that the CTX have complete type in order to allow things like: template <class T> struct S { typename S<T>::X Y; }; But, such constructs have already been resolved by this point, so here CTX really should have complete type, unless it's a partial instantiation. */ ctx = complete_type (ctx); if (!COMPLETE_TYPE_P (ctx)) { if (complain & tf_error) cxx_incomplete_type_error (NULL_TREE, ctx); return error_mark_node; } } f = make_typename_type (ctx, f, typename_type, complain | tf_keep_type_decl); if (f == error_mark_node) return f; if (TREE_CODE (f) == TYPE_DECL) { complain |= tf_ignore_bad_quals; f = TREE_TYPE (f); } if (TREE_CODE (f) != TYPENAME_TYPE) { if (TYPENAME_IS_ENUM_P (t) && TREE_CODE (f) != ENUMERAL_TYPE) { if (complain & tf_error) error ("%qT resolves to %qT, which is not an enumeration type", t, f); else return error_mark_node; } else if (TYPENAME_IS_CLASS_P (t) && !CLASS_TYPE_P (f)) { if (complain & tf_error) error ("%qT resolves to %qT, which is not a class type", t, f); else return error_mark_node; } } return cp_build_qualified_type_real (f, cp_type_quals (f) | cp_type_quals (t), complain); } case UNBOUND_CLASS_TEMPLATE: { tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); tree name = TYPE_IDENTIFIER (t); tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t)); if (ctx == error_mark_node || name == error_mark_node) return error_mark_node; if (parm_list) parm_list = tsubst_template_parms (parm_list, args, complain); return make_unbound_class_template (ctx, name, parm_list, complain); } case TYPEOF_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_expr (TYPEOF_TYPE_EXPR (t), args, complain, in_decl, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; type = finish_typeof (type); return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain); } case DECLTYPE_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args, complain|tf_decltype, in_decl, /*function_p*/false, /*integral_constant_expression*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (DECLTYPE_FOR_LAMBDA_CAPTURE (t)) type = lambda_capture_field_type (type, false /*explicit_init*/, DECLTYPE_FOR_REF_CAPTURE (t)); else if (DECLTYPE_FOR_LAMBDA_PROXY (t)) type = lambda_proxy_type (type); else { bool id = DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (t); if (id && TREE_CODE (DECLTYPE_TYPE_EXPR (t)) == BIT_NOT_EXPR && EXPR_P (type)) /* In a template ~id could be either a complement expression or an unqualified-id naming a destructor; if instantiating it produces an expression, it's not an id-expression or member access. */ id = false; type = finish_decltype_type (type, id, complain); } return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain | tf_ignore_bad_quals); } case UNDERLYING_TYPE: { tree type = tsubst (UNDERLYING_TYPE_TYPE (t), args, complain, in_decl); return finish_underlying_type (type); } case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: { tree r; if (code == NONTYPE_ARGUMENT_PACK) r = make_node (code); else r = cxx_make_type (code); tree pack_args = ARGUMENT_PACK_ARGS (t); pack_args = tsubst_template_args (pack_args, args, complain, in_decl); SET_ARGUMENT_PACK_ARGS (r, pack_args); return r; } case VOID_CST: case INTEGER_CST: case REAL_CST: case STRING_CST: case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: case NOP_EXPR: case INDIRECT_REF: case ADDR_EXPR: case CALL_EXPR: case ARRAY_REF: case SCOPE_REF: /* We should use one of the expression tsubsts for these codes. */ gcc_unreachable (); default: sorry ("use of %qs in template", get_tree_code_name (code)); return error_mark_node; } } /* tsubst a BASELINK. OBJECT_TYPE, if non-NULL, is the type of the expression on the left-hand side of the "." or "->" operator. We only do the lookup if we had a dependent BASELINK. Otherwise we adjust it onto the instantiated heirarchy. */ static tree tsubst_baselink (tree baselink, tree object_type, tree args, tsubst_flags_t complain, tree in_decl) { bool qualified_p = BASELINK_QUALIFIED_P (baselink); tree qualifying_scope = BINFO_TYPE (BASELINK_ACCESS_BINFO (baselink)); qualifying_scope = tsubst (qualifying_scope, args, complain, in_decl); tree optype = BASELINK_OPTYPE (baselink); optype = tsubst (optype, args, complain, in_decl); tree template_args = NULL_TREE; bool template_id_p = false; tree fns = BASELINK_FUNCTIONS (baselink); if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { template_id_p = true; template_args = TREE_OPERAND (fns, 1); fns = TREE_OPERAND (fns, 0); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); } tree binfo_type = BINFO_TYPE (BASELINK_BINFO (baselink)); binfo_type = tsubst (binfo_type, args, complain, in_decl); bool dependent_p = binfo_type != BINFO_TYPE (BASELINK_BINFO (baselink)); if (dependent_p) { tree name = OVL_NAME (fns); if (IDENTIFIER_CONV_OP_P (name)) name = make_conv_op_name (optype); if (name == complete_dtor_identifier) /* Treat as-if non-dependent below. */ dependent_p = false; baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1); if (!baselink) { if ((complain & tf_error) && constructor_name_p (name, qualifying_scope)) error ("cannot call constructor %<%T::%D%> directly", qualifying_scope, name); return error_mark_node; } if (BASELINK_P (baselink)) fns = BASELINK_FUNCTIONS (baselink); } else /* We're going to overwrite pieces below, make a duplicate. */ baselink = copy_node (baselink); /* If lookup found a single function, mark it as used at this point. (If lookup found multiple functions the one selected later by overload resolution will be marked as used at that point.) */ if (!template_id_p && !really_overloaded_fn (fns)) { tree fn = OVL_FIRST (fns); bool ok = mark_used (fn, complain); if (!ok && !(complain & tf_error)) return error_mark_node; if (ok && BASELINK_P (baselink)) /* We might have instantiated an auto function. */ TREE_TYPE (baselink) = TREE_TYPE (fn); } if (BASELINK_P (baselink)) { /* Add back the template arguments, if present. */ if (template_id_p) BASELINK_FUNCTIONS (baselink) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, template_args); /* Update the conversion operator type. */ BASELINK_OPTYPE (baselink) = optype; } if (!object_type) object_type = current_class_type; if (qualified_p || !dependent_p) { baselink = adjust_result_of_qualified_name_lookup (baselink, qualifying_scope, object_type); if (!qualified_p) /* We need to call adjust_result_of_qualified_name_lookup in case the destructor names a base class, but we unset BASELINK_QUALIFIED_P so that we still get virtual function binding. */ BASELINK_QUALIFIED_P (baselink) = false; } return baselink; } /* Like tsubst_expr for a SCOPE_REF, given by QUALIFIED_ID. DONE is true if the qualified-id will be a postfix-expression in-and-of itself; false if more of the postfix-expression follows the QUALIFIED_ID. ADDRESS_P is true if the qualified-id is the operand of "&". */ static tree tsubst_qualified_id (tree qualified_id, tree args, tsubst_flags_t complain, tree in_decl, bool done, bool address_p) { tree expr; tree scope; tree name; bool is_template; tree template_args; location_t loc = UNKNOWN_LOCATION; gcc_assert (TREE_CODE (qualified_id) == SCOPE_REF); /* Figure out what name to look up. */ name = TREE_OPERAND (qualified_id, 1); if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { is_template = true; loc = EXPR_LOCATION (name); template_args = TREE_OPERAND (name, 1); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); if (template_args == error_mark_node) return error_mark_node; name = TREE_OPERAND (name, 0); } else { is_template = false; template_args = NULL_TREE; } /* Substitute into the qualifying scope. When there are no ARGS, we are just trying to simplify a non-dependent expression. In that case the qualifying scope may be dependent, and, in any case, substituting will not help. */ scope = TREE_OPERAND (qualified_id, 0); if (args) { scope = tsubst (scope, args, complain, in_decl); expr = tsubst_copy (name, args, complain, in_decl); } else expr = name; if (dependent_scope_p (scope)) { if (is_template) expr = build_min_nt_loc (loc, TEMPLATE_ID_EXPR, expr, template_args); tree r = build_qualified_name (NULL_TREE, scope, expr, QUALIFIED_NAME_IS_TEMPLATE (qualified_id)); REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (qualified_id); return r; } if (!BASELINK_P (name) && !DECL_P (expr)) { if (TREE_CODE (expr) == BIT_NOT_EXPR) { /* A BIT_NOT_EXPR is used to represent a destructor. */ if (!check_dtor_name (scope, TREE_OPERAND (expr, 0))) { error ("qualifying type %qT does not match destructor name ~%qT", scope, TREE_OPERAND (expr, 0)); expr = error_mark_node; } else expr = lookup_qualified_name (scope, complete_dtor_identifier, /*is_type_p=*/0, false); } else expr = lookup_qualified_name (scope, expr, /*is_type_p=*/0, false); if (TREE_CODE (TREE_CODE (expr) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (expr) : expr) == TYPE_DECL) { if (complain & tf_error) { error ("dependent-name %qE is parsed as a non-type, but " "instantiation yields a type", qualified_id); inform (input_location, "say %<typename %E%> if a type is meant", qualified_id); } return error_mark_node; } } if (DECL_P (expr)) { check_accessibility_of_qualified_id (expr, /*object_type=*/NULL_TREE, scope); /* Remember that there was a reference to this entity. */ if (!mark_used (expr, complain) && !(complain & tf_error)) return error_mark_node; } if (expr == error_mark_node || TREE_CODE (expr) == TREE_LIST) { if (complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); return error_mark_node; } if (is_template) { /* We may be repeating a check already done during parsing, but if it was well-formed and passed then, it will pass again now, and if it didn't, we wouldn't have got here. The case we want to catch is when we couldn't tell then, and can now, namely when templ prior to substitution was an identifier. */ if (flag_concepts && check_auto_in_tmpl_args (expr, template_args)) return error_mark_node; if (variable_template_p (expr)) expr = lookup_and_finish_template_variable (expr, template_args, complain); else expr = lookup_template_function (expr, template_args); } if (expr == error_mark_node && complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); else if (TYPE_P (scope)) { expr = (adjust_result_of_qualified_name_lookup (expr, scope, current_nonlambda_class_type ())); expr = (finish_qualified_id_expr (scope, expr, done, address_p && PTRMEM_OK_P (qualified_id), QUALIFIED_NAME_IS_TEMPLATE (qualified_id), /*template_arg_p=*/false, complain)); } /* Expressions do not generally have reference type. */ if (TREE_CODE (expr) != SCOPE_REF /* However, if we're about to form a pointer-to-member, we just want the referenced member referenced. */ && TREE_CODE (expr) != OFFSET_REF) expr = convert_from_reference (expr); if (REF_PARENTHESIZED_P (qualified_id)) expr = force_paren_expr (expr); return expr; } /* tsubst the initializer for a VAR_DECL. INIT is the unsubstituted initializer, DECL is the substituted VAR_DECL. Other arguments are as for tsubst. */ static tree tsubst_init (tree init, tree decl, tree args, tsubst_flags_t complain, tree in_decl) { if (!init) return NULL_TREE; init = tsubst_expr (init, args, complain, in_decl, false); tree type = TREE_TYPE (decl); if (!init && type != error_mark_node) { if (tree auto_node = type_uses_auto (type)) { if (!CLASS_PLACEHOLDER_TEMPLATE (auto_node)) { if (complain & tf_error) error ("initializer for %q#D expands to an empty list " "of expressions", decl); return error_mark_node; } } else if (!dependent_type_p (type)) { /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = build_value_init (type, complain); if (TREE_CODE (init) == AGGR_INIT_EXPR) init = get_target_expr_sfinae (init, complain); if (TREE_CODE (init) == TARGET_EXPR) TARGET_EXPR_DIRECT_INIT_P (init) = true; } } return init; } /* If T is a reference to a dependent member of the current instantiation C and we are trying to refer to that member in a partial instantiation of C, return a SCOPE_REF; otherwise, return NULL_TREE. This can happen when forming a C++20 alias template deduction guide, as in PR96199. */ static tree maybe_dependent_member_ref (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (cxx_dialect < cxx2a) return NULL_TREE; tree ctx = context_for_name_lookup (t); if (!CLASS_TYPE_P (ctx)) return NULL_TREE; ctx = tsubst (ctx, args, complain, in_decl); if (dependent_scope_p (ctx)) return build_qualified_name (NULL_TREE, ctx, DECL_NAME (t), /*template_p=*/false); return NULL_TREE; } /* Like tsubst, but deals with expressions. This function just replaces template parms; to finish processing the resultant expression, use tsubst_copy_and_build or tsubst_expr. */ static tree tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree r; if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE) return t; code = TREE_CODE (t); switch (code) { case PARM_DECL: r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* We get here for a use of 'this' in an NSDMI. */ if (DECL_NAME (t) == this_identifier && current_class_ptr) return current_class_ptr; /* This can happen for a parameter name used later in a function declaration (such as in a late-specified return type). Just make a dummy decl, since it's only used for its type. */ gcc_assert (cp_unevaluated_operand != 0); r = tsubst_decl (t, args, complain); /* Give it the template pattern as its context; its true context hasn't been instantiated yet and this is good enough for mangling. */ DECL_CONTEXT (r) = DECL_CONTEXT (t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); if (!mark_used (r, complain) && !(complain & tf_error)) return error_mark_node; return r; case CONST_DECL: { tree enum_type; tree v; if (DECL_TEMPLATE_PARM_P (t)) return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl); /* There is no need to substitute into namespace-scope enumerators. */ if (DECL_NAMESPACE_SCOPE_P (t)) return t; /* If ARGS is NULL, then T is known to be non-dependent. */ if (args == NULL_TREE) return scalar_constant_value (t); if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl)) return ref; /* Unfortunately, we cannot just call lookup_name here. Consider: template <int I> int f() { enum E { a = I }; struct S { void g() { E e = a; } }; }; When we instantiate f<7>::S::g(), say, lookup_name is not clever enough to find f<7>::a. */ enum_type = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/0); for (v = TYPE_VALUES (enum_type); v != NULL_TREE; v = TREE_CHAIN (v)) if (TREE_PURPOSE (v) == DECL_NAME (t)) return TREE_VALUE (v); /* We didn't find the name. That should never happen; if name-lookup found it during preliminary parsing, we should find it again here during instantiation. */ gcc_unreachable (); } return t; case FIELD_DECL: if (DECL_CONTEXT (t)) { tree ctx; ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); if (ctx != DECL_CONTEXT (t)) { tree r = lookup_field (ctx, DECL_NAME (t), 0, false); if (!r) { if (complain & tf_error) error ("using invalid field %qD", t); return error_mark_node; } return r; } } return t; case VAR_DECL: if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl)) return ref; gcc_fallthrough(); case FUNCTION_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) r = tsubst (t, args, complain, in_decl); else if (local_variable_p (t) && uses_template_parms (DECL_CONTEXT (t))) { r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* First try name lookup to find the instantiation. */ r = lookup_name (DECL_NAME (t)); if (r) { if (!VAR_P (r)) { /* During error-recovery we may find a non-variable, even an OVERLOAD: just bail out and avoid ICEs and duplicate diagnostics (c++/62207). */ gcc_assert (seen_error ()); return error_mark_node; } if (!is_capture_proxy (r)) { /* Make sure the one we found is the one we want. */ tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t)); if (ctx != DECL_CONTEXT (r)) r = NULL_TREE; } } if (r) /* OK */; else { /* This can happen for a variable used in a late-specified return type of a local lambda, or for a local static or constant. Building a new VAR_DECL should be OK in all those cases. */ r = tsubst_decl (t, args, complain); if (local_specializations) /* Avoid infinite recursion (79640). */ register_local_specialization (r, t); if (decl_maybe_constant_var_p (r)) { /* We can't call cp_finish_decl, so handle the initializer by hand. */ tree init = tsubst_init (DECL_INITIAL (t), r, args, complain, in_decl); if (!processing_template_decl) init = maybe_constant_init (init); if (processing_template_decl ? potential_constant_expression (init) : reduced_constant_expression_p (init)) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = TREE_CONSTANT (r) = true; DECL_INITIAL (r) = init; if (tree auto_node = type_uses_auto (TREE_TYPE (r))) TREE_TYPE (r) = do_auto_deduction (TREE_TYPE (r), init, auto_node, complain, adc_variable_type); } gcc_assert (cp_unevaluated_operand || TREE_STATIC (r) || decl_constant_var_p (r) || seen_error ()); if (!processing_template_decl && !TREE_STATIC (r)) r = process_outer_var_ref (r, complain); } /* Remember this for subsequent uses. */ if (local_specializations) register_local_specialization (r, t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); } else r = t; if (!mark_used (r, complain)) return error_mark_node; return r; case NAMESPACE_DECL: return t; case OVERLOAD: return t; case BASELINK: return tsubst_baselink (t, current_nonlambda_class_type (), args, complain, in_decl); case TEMPLATE_DECL: if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)), args, complain, in_decl); else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t)) return tsubst (t, args, complain, in_decl); else if (DECL_CLASS_SCOPE_P (t) && uses_template_parms (DECL_CONTEXT (t))) { /* Template template argument like the following example need special treatment: template <template <class> class TT> struct C {}; template <class T> struct D { template <class U> struct E {}; C<E> c; // #1 }; D<int> d; // #2 We are processing the template argument `E' in #1 for the template instantiation #2. Originally, `E' is a TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we have to substitute this with one having context `D<int>'. */ tree context = tsubst (DECL_CONTEXT (t), args, complain, in_decl); if (dependent_scope_p (context)) { /* When rewriting a constructor into a deduction guide, a non-dependent name can become dependent, so memtmpl<args> becomes context::template memtmpl<args>. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_qualified_name (type, context, DECL_NAME (t), /*template*/true); } return lookup_field (context, DECL_NAME(t), 0, false); } else /* Ordinary template template argument. */ return t; case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: { /* Handle location wrappers by substituting the wrapped node first, *then* reusing the resulting type. Doing the type first ensures that we handle template parameters and parameter pack expansions. */ if (location_wrapper_p (t)) { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return maybe_wrap_with_location (op0, EXPR_LOCATION (t)); } tree op = TREE_OPERAND (t, 0); if (code == VIEW_CONVERT_EXPR && TREE_CODE (op) == TEMPLATE_PARM_INDEX) { /* Wrapper to make a C++20 template parameter object const. */ op = tsubst_copy (op, args, complain, in_decl); if (TREE_CODE (op) == TEMPLATE_PARM_INDEX) { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build1 (code, type, op); } else if (!CP_TYPE_CONST_P (TREE_TYPE (op))) { /* The template argument is not const, presumably because it is still dependent, and so not the const template parm object. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); gcc_checking_assert (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (op))); if (TREE_CODE (op) == CONSTRUCTOR || TREE_CODE (op) == IMPLICIT_CONV_EXPR) { /* Don't add a wrapper to these. */ op = copy_node (op); TREE_TYPE (op) = type; } else /* Do add a wrapper otherwise. */ op = build1 (code, type, op); } return op; } /* force_paren_expr can also create a VIEW_CONVERT_EXPR. */ else if (code == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t)) { op = tsubst_copy (op, args, complain, in_decl); op = build1 (code, TREE_TYPE (op), op); REF_PARENTHESIZED_P (op) = true; return op; } /* We shouldn't see any other uses of these in templates. */ gcc_unreachable (); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case IMPLICIT_CONV_EXPR: case CONVERT_EXPR: case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return build1 (code, type, op0); } case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) { tree expanded, op = TREE_OPERAND (t, 0); int len = 0; if (SIZEOF_EXPR_TYPE_P (t)) op = TREE_TYPE (op); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* We only want to compute the number of arguments. */ if (PACK_EXPANSION_P (op)) expanded = tsubst_pack_expansion (op, args, complain, in_decl); else expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op), args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (TREE_CODE (expanded) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded); /* Set TREE_USED for the benefit of -Wunused. */ for (int i = 0; i < len; i++) if (DECL_P (TREE_VEC_ELT (expanded, i))) TREE_USED (TREE_VEC_ELT (expanded, i)) = true; } if (expanded == error_mark_node) return error_mark_node; else if (PACK_EXPANSION_P (expanded) || (TREE_CODE (expanded) == TREE_VEC && pack_expansion_args_count (expanded))) { if (PACK_EXPANSION_P (expanded)) /* OK. */; else if (TREE_VEC_LENGTH (expanded) == 1) expanded = TREE_VEC_ELT (expanded, 0); else expanded = make_argument_pack (expanded); if (TYPE_P (expanded)) return cxx_sizeof_or_alignof_type (input_location, expanded, SIZEOF_EXPR, false, complain & tf_error); else return cxx_sizeof_or_alignof_expr (input_location, expanded, SIZEOF_EXPR, complain & tf_error); } else return build_int_cst (size_type_node, len); } if (SIZEOF_EXPR_TYPE_P (t)) { r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)), args, complain, in_decl); r = build1 (NOP_EXPR, r, error_mark_node); r = build1 (SIZEOF_EXPR, tsubst (TREE_TYPE (t), args, complain, in_decl), r); SIZEOF_EXPR_TYPE_P (r) = 1; return r; } /* Fall through */ case INDIRECT_REF: case NEGATE_EXPR: case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case ADDR_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case ALIGNOF_EXPR: case AT_ENCODE_EXPR: case ARROW_EXPR: case THROW_EXPR: case TYPEID_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case PAREN_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); r = build1 (code, type, op0); if (code == ALIGNOF_EXPR) ALIGNOF_EXPR_STD_P (r) = ALIGNOF_EXPR_STD_P (t); return r; } case COMPONENT_REF: { tree object; tree name; object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); name = TREE_OPERAND (t, 1); if (TREE_CODE (name) == BIT_NOT_EXPR) { name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); } else if (TREE_CODE (name) == SCOPE_REF && TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR) { tree base = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = TREE_OPERAND (name, 1); name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); name = build_qualified_name (/*type=*/NULL_TREE, base, name, /*template_p=*/false); } else if (BASELINK_P (name)) name = tsubst_baselink (name, non_reference (TREE_TYPE (object)), args, complain, in_decl); else name = tsubst_copy (name, args, complain, in_decl); return build_nt (COMPONENT_REF, object, name, NULL_TREE); } case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case COMPOUND_EXPR: case DOTSTAR_EXPR: case MEMBER_REF: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (code, op0, op1); } case SCOPE_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_qualified_name (/*type=*/NULL_TREE, op0, op1, QUALIFIED_NAME_IS_TEMPLATE (t)); } case ARRAY_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE); } case CALL_EXPR: { int n = VL_EXP_OPERAND_LENGTH (t); tree result = build_vl_exp (CALL_EXPR, n); int i; for (i = 0; i < n; i++) TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args, complain, in_decl); return result; } case COND_EXPR: case MODOP_EXPR: case PSEUDO_DTOR_EXPR: case VEC_PERM_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); TREE_NO_WARNING (r) = TREE_NO_WARNING (t); return r; } case NEW_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t); return r; } case DELETE_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build_nt (code, op0, op1); DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t); DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t); return r; } case TEMPLATE_ID_EXPR: { /* Substituted template arguments */ tree fn = TREE_OPERAND (t, 0); tree targs = TREE_OPERAND (t, 1); fn = tsubst_copy (fn, args, complain, in_decl); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); return lookup_template_function (fn, targs); } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = tsubst_copy (purpose, args, complain, in_decl); value = TREE_VALUE (t); if (value) value = tsubst_copy (value, args, complain, in_decl); chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = tsubst_copy (chain, args, complain, in_decl); if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return tree_cons (purpose, value, chain); } case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: case POINTER_TYPE: case REFERENCE_TYPE: case OFFSET_TYPE: case FUNCTION_TYPE: case METHOD_TYPE: case ARRAY_TYPE: case TYPENAME_TYPE: case UNBOUND_CLASS_TEMPLATE: case TYPEOF_TYPE: case DECLTYPE_TYPE: case TYPE_DECL: return tsubst (t, args, complain, in_decl); case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); return make_conv_op_name (new_type); } else return t; case CONSTRUCTOR: /* This is handled by tsubst_copy_and_build. */ gcc_unreachable (); case VA_ARG_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_x_va_arg (EXPR_LOCATION (t), op0, type); } case CLEANUP_POINT_EXPR: /* We shouldn't have built any of these during initial template generation. Instead, they should be built during instantiation in response to the saved STMT_IS_FULL_EXPR_P setting. */ gcc_unreachable (); case OFFSET_REF: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build2 (code, type, op0, op1); PTRMEM_OK_P (r) = PTRMEM_OK_P (t); if (!mark_used (TREE_OPERAND (r, 1), complain) && !(complain & tf_error)) return error_mark_node; return r; } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); return error_mark_node; case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); return error_mark_node; case VOID_CST: gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t))); return t; case INTEGER_CST: case REAL_CST: case COMPLEX_CST: { /* Instantiate any typedefs in the type. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); r = fold_convert (type, t); gcc_assert (TREE_CODE (r) == code); return r; } case STRING_CST: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); r = t; if (type != TREE_TYPE (t)) { r = copy_node (t); TREE_TYPE (r) = type; } return r; } case PTRMEM_CST: /* These can sometimes show up in a partial instantiation, but never involve template parms. */ gcc_assert (!uses_template_parms (t)); return t; case UNARY_LEFT_FOLD_EXPR: return tsubst_unary_left_fold (t, args, complain, in_decl); case UNARY_RIGHT_FOLD_EXPR: return tsubst_unary_right_fold (t, args, complain, in_decl); case BINARY_LEFT_FOLD_EXPR: return tsubst_binary_left_fold (t, args, complain, in_decl); case BINARY_RIGHT_FOLD_EXPR: return tsubst_binary_right_fold (t, args, complain, in_decl); case PREDICT_EXPR: return t; case DEBUG_BEGIN_STMT: /* ??? There's no point in copying it for now, but maybe some day it will contain more information, such as a pointer back to the containing function, inlined copy or so. */ return t; case CO_AWAIT_EXPR: return tsubst_expr (t, args, complain, in_decl, /*integral_constant_expression_p=*/false); break; default: /* We shouldn't get here, but keep going if !flag_checking. */ if (flag_checking) gcc_unreachable (); return t; } } /* Helper function for tsubst_omp_clauses, used for instantiation of OMP_CLAUSE_DECL of clauses. */ static tree tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, tree in_decl, tree *iterator_cache) { if (decl == NULL_TREE) return NULL_TREE; /* Handle OpenMP iterators. */ if (TREE_CODE (decl) == TREE_LIST && TREE_PURPOSE (decl) && TREE_CODE (TREE_PURPOSE (decl)) == TREE_VEC) { tree ret; if (iterator_cache[0] == TREE_PURPOSE (decl)) ret = iterator_cache[1]; else { tree *tp = &ret; begin_scope (sk_omp, NULL); for (tree it = TREE_PURPOSE (decl); it; it = TREE_CHAIN (it)) { *tp = copy_node (it); TREE_VEC_ELT (*tp, 0) = tsubst_decl (TREE_VEC_ELT (it, 0), args, complain); TREE_VEC_ELT (*tp, 1) = tsubst_expr (TREE_VEC_ELT (it, 1), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_VEC_ELT (*tp, 2) = tsubst_expr (TREE_VEC_ELT (it, 2), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_VEC_ELT (*tp, 3) = tsubst_expr (TREE_VEC_ELT (it, 3), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_CHAIN (*tp) = NULL_TREE; tp = &TREE_CHAIN (*tp); } TREE_VEC_ELT (ret, 5) = poplevel (1, 1, 0); iterator_cache[0] = TREE_PURPOSE (decl); iterator_cache[1] = ret; } return build_tree_list (ret, tsubst_omp_clause_decl (TREE_VALUE (decl), args, complain, in_decl, NULL)); } /* Handle an OpenMP array section represented as a TREE_LIST (or OMP_CLAUSE_DEPEND_KIND). An OMP_CLAUSE_DEPEND (with a depend kind of OMP_CLAUSE_DEPEND_SINK) can also be represented as a TREE_LIST. We can handle it exactly the same as an array section (purpose, value, and a chain), even though the nomenclature (low_bound, length, etc) is different. */ if (TREE_CODE (decl) == TREE_LIST) { tree low_bound = tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain, in_decl, NULL); if (TREE_PURPOSE (decl) == low_bound && TREE_VALUE (decl) == length && TREE_CHAIN (decl) == chain) return decl; tree ret = tree_cons (low_bound, length, chain); OMP_CLAUSE_DEPEND_SINK_NEGATIVE (ret) = OMP_CLAUSE_DEPEND_SINK_NEGATIVE (decl); return ret; } tree ret = tsubst_expr (decl, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Undo convert_from_reference tsubst_expr could have called. */ if (decl && REFERENCE_REF_P (ret) && !REFERENCE_REF_P (decl)) ret = TREE_OPERAND (ret, 0); return ret; } /* Like tsubst_copy, but specifically for OpenMP clauses. */ static tree tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, tree args, tsubst_flags_t complain, tree in_decl) { tree new_clauses = NULL_TREE, nc, oc; tree linear_no_step = NULL_TREE; tree iterator_cache[2] = { NULL_TREE, NULL_TREE }; for (oc = clauses; oc ; oc = OMP_CLAUSE_CHAIN (oc)) { nc = copy_node (oc); OMP_CLAUSE_CHAIN (nc) = new_clauses; new_clauses = nc; switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_STMT (oc)) { OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list (); tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); OMP_CLAUSE_LASTPRIVATE_STMT (nc) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc)); } /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_UNIFORM: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: case OMP_CLAUSE_MAP: case OMP_CLAUSE__CACHE_: case OMP_CLAUSE_NONTEMPORAL: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, iterator_cache); break; case OMP_CLAUSE_TILE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_HINT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: OMP_CLAUSE_OPERAND (nc, 0) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc); if (TREE_CODE (placeholder) == SCOPE_REF) { tree scope = tsubst (TREE_OPERAND (placeholder, 0), args, complain, in_decl); OMP_CLAUSE_REDUCTION_PLACEHOLDER (nc) = build_qualified_name (NULL_TREE, scope, TREE_OPERAND (placeholder, 1), false); } else gcc_assert (identifier_p (placeholder)); } OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); break; case OMP_CLAUSE_GANG: case OMP_CLAUSE_ALIGNED: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); OMP_CLAUSE_OPERAND (nc, 1) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_LINEAR: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); if (OMP_CLAUSE_LINEAR_STEP (oc) == NULL_TREE) { gcc_assert (!linear_no_step); linear_no_step = nc; } else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (oc)) OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl, NULL); else OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_FOR: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_ORDER: case OMP_CLAUSE_BIND: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: break; default: gcc_unreachable (); } if ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP) switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_SHARED: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_LINEAR: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: /* tsubst_expr on SCOPE_REF results in returning finish_non_static_data_member result. Undo that here. */ if (TREE_CODE (OMP_CLAUSE_DECL (oc)) == SCOPE_REF && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (oc), 1)) == IDENTIFIER_NODE)) { tree t = OMP_CLAUSE_DECL (nc); tree v = t; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) OMP_CLAUSE_DECL (nc) = TREE_OPERAND (t, 1); /* FALLTHRU */ default: v = NULL_TREE; break; } } else if (VAR_P (OMP_CLAUSE_DECL (oc)) && DECL_HAS_VALUE_EXPR_P (OMP_CLAUSE_DECL (oc)) && DECL_ARTIFICIAL (OMP_CLAUSE_DECL (oc)) && DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (oc)) && DECL_OMP_PRIVATIZED_MEMBER (OMP_CLAUSE_DECL (oc))) { tree decl = OMP_CLAUSE_DECL (nc); if (VAR_P (decl)) { retrofit_lang_decl (decl); DECL_OMP_PRIVATIZED_MEMBER (decl) = 1; } } break; default: break; } } new_clauses = nreverse (new_clauses); if (ort != C_ORT_OMP_DECLARE_SIMD) { new_clauses = finish_omp_clauses (new_clauses, ort); if (linear_no_step) for (nc = new_clauses; nc; nc = OMP_CLAUSE_CHAIN (nc)) if (nc == linear_no_step) { OMP_CLAUSE_LINEAR_STEP (nc) = NULL_TREE; break; } } return new_clauses; } /* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */ static tree tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RECUR(t) tsubst_copy_asm_operands (t, args, complain, in_decl) tree purpose, value, chain; if (t == NULL) return t; if (TREE_CODE (t) != TREE_LIST) return tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = RECUR (purpose); value = TREE_VALUE (t); if (value) { if (TREE_CODE (value) != LABEL_DECL) value = RECUR (value); else { value = lookup_label (DECL_NAME (value)); gcc_assert (TREE_CODE (value) == LABEL_DECL); TREE_USED (value) = 1; } } chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); return tree_cons (purpose, value, chain); #undef RECUR } /* Used to temporarily communicate the list of #pragma omp parallel clauses to #pragma omp for instantiation if they are combined together. */ static tree *omp_parallel_combined_clauses; static tree tsubst_decomp_names (tree, tree, tree, tsubst_flags_t, tree, tree *, unsigned int *); /* Substitute one OMP_FOR iterator. */ static bool tsubst_omp_for_iterator (tree t, int i, tree declv, tree &orig_declv, tree initv, tree condv, tree incrv, tree *clauses, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree decl, init, cond = NULL_TREE, incr = NULL_TREE; bool ret = false; init = TREE_VEC_ELT (OMP_FOR_INIT (t), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); tree decl_expr = NULL_TREE; bool range_for = TREE_VEC_ELT (OMP_FOR_COND (t), i) == global_namespace; if (range_for) { bool decomp = false; if (decl != error_mark_node && DECL_HAS_VALUE_EXPR_P (decl)) { tree v = DECL_VALUE_EXPR (decl); if (TREE_CODE (v) == ARRAY_REF && VAR_P (TREE_OPERAND (v, 0)) && DECL_DECOMPOSITION_P (TREE_OPERAND (v, 0))) { tree decomp_first = NULL_TREE; unsigned decomp_cnt = 0; tree d = tsubst_decl (TREE_OPERAND (v, 0), args, complain); maybe_push_decl (d); d = tsubst_decomp_names (d, TREE_OPERAND (v, 0), args, complain, in_decl, &decomp_first, &decomp_cnt); decomp = true; if (d == error_mark_node) decl = error_mark_node; else for (unsigned int i = 0; i < decomp_cnt; i++) { if (!DECL_HAS_VALUE_EXPR_P (decomp_first)) { tree v = build_nt (ARRAY_REF, d, size_int (decomp_cnt - i - 1), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (decomp_first, v); DECL_HAS_VALUE_EXPR_P (decomp_first) = 1; } fit_decomposition_lang_decl (decomp_first, d); decomp_first = DECL_CHAIN (decomp_first); } } } decl = tsubst_decl (decl, args, complain); if (!decomp) maybe_push_decl (decl); } else if (init && TREE_CODE (init) == DECL_EXPR) { /* We need to jump through some hoops to handle declarations in the init-statement, since we might need to handle auto deduction, but we need to keep control of initialization. */ decl_expr = init; init = DECL_INITIAL (DECL_EXPR_DECL (init)); decl = tsubst_decl (decl, args, complain); } else { if (TREE_CODE (decl) == SCOPE_REF) { decl = RECUR (decl); if (TREE_CODE (decl) == COMPONENT_REF) { tree v = decl; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) { decl = TREE_OPERAND (decl, 1); decl = omp_privatize_field (decl, false); } /* FALLTHRU */ default: v = NULL_TREE; break; } } } else decl = RECUR (decl); } init = RECUR (init); if (orig_declv && OMP_FOR_ORIG_DECLS (t)) { tree o = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (t), i); if (TREE_CODE (o) == TREE_LIST) TREE_VEC_ELT (orig_declv, i) = tree_cons (RECUR (TREE_PURPOSE (o)), RECUR (TREE_VALUE (o)), NULL_TREE); else TREE_VEC_ELT (orig_declv, i) = RECUR (o); } if (range_for) { tree this_pre_body = NULL_TREE; tree orig_init = NULL_TREE; tree orig_decl = NULL_TREE; cp_convert_omp_range_for (this_pre_body, NULL, decl, orig_decl, init, orig_init, cond, incr); if (orig_decl) { if (orig_declv == NULL_TREE) orig_declv = copy_node (declv); TREE_VEC_ELT (orig_declv, i) = orig_decl; ret = true; } else if (orig_declv) TREE_VEC_ELT (orig_declv, i) = decl; } tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (!range_for && auto_node && init) TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node, complain); gcc_assert (!type_dependent_expression_p (decl)); if (!CLASS_TYPE_P (TREE_TYPE (decl)) || range_for) { if (decl_expr) { /* Declare the variable, but don't let that initialize it. */ tree init_sav = DECL_INITIAL (DECL_EXPR_DECL (decl_expr)); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL_TREE; RECUR (decl_expr); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init_sav; } if (!range_for) { cond = RECUR (TREE_VEC_ELT (OMP_FOR_COND (t), i)); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); if (TREE_CODE (incr) == MODIFY_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs = RECUR (TREE_OPERAND (incr, 1)); incr = build_x_modify_expr (EXPR_LOCATION (incr), lhs, NOP_EXPR, rhs, complain); } else incr = RECUR (incr); if (orig_declv && !OMP_FOR_ORIG_DECLS (t)) TREE_VEC_ELT (orig_declv, i) = decl; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return ret; } if (decl_expr) { /* Declare and initialize the variable. */ RECUR (decl_expr); init = NULL_TREE; } else if (init) { tree *pc; int j; for (j = ((omp_parallel_combined_clauses == NULL || TREE_CODE (t) == OMP_LOOP) ? 1 : 0); j < 2; j++) { for (pc = j ? clauses : omp_parallel_combined_clauses; *pc; ) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*pc) == decl) break; else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { if (j) break; /* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */ tree c = *pc; *pc = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be firstprivate", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be reduction", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else pc = &OMP_CLAUSE_CHAIN (*pc); } if (*pc) break; } if (*pc == NULL_TREE) { tree c = build_omp_clause (input_location, TREE_CODE (t) == OMP_LOOP ? OMP_CLAUSE_LASTPRIVATE : OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c, C_ORT_OMP); if (c) { OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } } } cond = TREE_VEC_ELT (OMP_FOR_COND (t), i); if (COMPARISON_CLASS_P (cond)) { tree op0 = RECUR (TREE_OPERAND (cond, 0)); tree op1 = RECUR (TREE_OPERAND (cond, 1)); cond = build2 (TREE_CODE (cond), boolean_type_node, op0, op1); } else cond = RECUR (cond); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: incr = build2 (TREE_CODE (incr), TREE_TYPE (decl), RECUR (TREE_OPERAND (incr, 0)), NULL_TREE); break; case MODIFY_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (TREE_OPERAND (incr, 1)), TREE_TYPE (decl), lhs, RECUR (TREE_OPERAND (incr, 2)))); } else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR && (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR))) { tree rhs = TREE_OPERAND (incr, 2); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; default: incr = RECUR (incr); break; } if (orig_declv && !OMP_FOR_ORIG_DECLS (t)) TREE_VEC_ELT (orig_declv, i) = decl; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return false; #undef RECUR } /* Helper function of tsubst_expr, find OMP_TEAMS inside of OMP_TARGET's body. */ static tree tsubst_find_omp_teams (tree *tp, int *walk_subtrees, void *) { *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_TEAMS: return *tp; case BIND_EXPR: case STATEMENT_LIST: *walk_subtrees = 1; break; default: break; } return NULL_TREE; } /* Helper function for tsubst_expr. For decomposition declaration artificial base DECL, which is tsubsted PATTERN_DECL, tsubst also the corresponding decls representing the identifiers of the decomposition declaration. Return DECL if successful or error_mark_node otherwise, set *FIRST to the first decl in the list chained through DECL_CHAIN and *CNT to the number of such decls. */ static tree tsubst_decomp_names (tree decl, tree pattern_decl, tree args, tsubst_flags_t complain, tree in_decl, tree *first, unsigned int *cnt) { tree decl2, decl3, prev = decl; *cnt = 0; gcc_assert (DECL_NAME (decl) == NULL_TREE); for (decl2 = DECL_CHAIN (pattern_decl); decl2 && VAR_P (decl2) && DECL_DECOMPOSITION_P (decl2) && DECL_NAME (decl2); decl2 = DECL_CHAIN (decl2)) { if (TREE_TYPE (decl2) == error_mark_node && *cnt == 0) { gcc_assert (errorcount); return error_mark_node; } (*cnt)++; gcc_assert (DECL_DECOMP_BASE (decl2) == pattern_decl); gcc_assert (DECL_HAS_VALUE_EXPR_P (decl2)); tree v = DECL_VALUE_EXPR (decl2); DECL_HAS_VALUE_EXPR_P (decl2) = 0; SET_DECL_VALUE_EXPR (decl2, NULL_TREE); decl3 = tsubst (decl2, args, complain, in_decl); SET_DECL_VALUE_EXPR (decl2, v); DECL_HAS_VALUE_EXPR_P (decl2) = 1; if (VAR_P (decl3)) DECL_TEMPLATE_INSTANTIATED (decl3) = 1; else { gcc_assert (errorcount); decl = error_mark_node; continue; } maybe_push_decl (decl3); if (error_operand_p (decl3)) decl = error_mark_node; else if (decl != error_mark_node && DECL_CHAIN (decl3) != prev && decl != prev) { gcc_assert (errorcount); decl = error_mark_node; } else prev = decl3; } *first = prev; return decl; } /* Return the proper local_specialization for init-capture pack DECL. */ static tree lookup_init_capture_pack (tree decl) { /* We handle normal pack captures by forwarding to the specialization of the captured parameter. We can't do that for pack init-captures; we need them to have their own local_specialization. We created the individual VAR_DECLs (if any) under build_capture_proxy, and we need to collect them when we process the DECL_EXPR for the pack init-capture in the template. So, how do we find them? We don't know the capture proxy pack when building the individual resulting proxies, and we don't know the individual proxies when instantiating the pack. What we have in common is the FIELD_DECL. So...when we instantiate the FIELD_DECL, we stick the result in local_specializations. Then at the DECL_EXPR we look up that result, see how many elements it has, synthesize the names, and look them up. */ tree cname = DECL_NAME (decl); tree val = DECL_VALUE_EXPR (decl); tree field = TREE_OPERAND (val, 1); gcc_assert (TREE_CODE (field) == FIELD_DECL); tree fpack = retrieve_local_specialization (field); if (fpack == error_mark_node) return error_mark_node; int len = 1; tree vec = NULL_TREE; tree r = NULL_TREE; if (TREE_CODE (fpack) == TREE_VEC) { len = TREE_VEC_LENGTH (fpack); vec = make_tree_vec (len); r = make_node (NONTYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (r, vec); } for (int i = 0; i < len; ++i) { tree ename = vec ? make_ith_pack_parameter_name (cname, i) : cname; tree elt = lookup_name_real (ename, 0, 0, true, 0, LOOKUP_NORMAL); if (vec) TREE_VEC_ELT (vec, i) = elt; else r = elt; } return r; } /* Like tsubst_copy for expressions, etc. but also does semantic processing. */ tree tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree stmt, tmp; tree r; location_t loc; if (t == NULL_TREE || t == error_mark_node) return t; loc = input_location; if (location_t eloc = cp_expr_location (t)) input_location = eloc; if (STATEMENT_CODE_P (TREE_CODE (t))) current_stmt_tree ()->stmts_are_full_exprs_p = STMT_IS_FULL_EXPR_P (t); switch (TREE_CODE (t)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) RECUR (tsi_stmt (i)); break; } case CTOR_INITIALIZER: finish_mem_initializers (tsubst_initializer_list (TREE_OPERAND (t, 0), args)); break; case RETURN_EXPR: finish_return_stmt (RECUR (TREE_OPERAND (t, 0))); break; case CO_RETURN_EXPR: finish_co_return_stmt (input_location, RECUR (TREE_OPERAND (t, 0))); break; case CO_YIELD_EXPR: stmt = finish_co_yield_expr (input_location, RECUR (TREE_OPERAND (t, 0))); RETURN (stmt); break; case CO_AWAIT_EXPR: stmt = finish_co_await_expr (input_location, RECUR (TREE_OPERAND (t, 0))); RETURN (stmt); break; case EXPR_STMT: tmp = RECUR (EXPR_STMT_EXPR (t)); if (EXPR_STMT_STMT_EXPR_RESULT (t)) finish_stmt_expr_expr (tmp, cur_stmt_expr); else finish_expr_stmt (tmp); break; case USING_STMT: finish_using_directive (USING_STMT_NAMESPACE (t), /*attribs=*/NULL_TREE); break; case DECL_EXPR: { tree decl, pattern_decl; tree init; pattern_decl = decl = DECL_EXPR_DECL (t); if (TREE_CODE (decl) == LABEL_DECL) finish_label_decl (DECL_NAME (decl)); else if (TREE_CODE (decl) == USING_DECL) { tree scope = USING_DECL_SCOPE (decl); tree name = DECL_NAME (decl); scope = tsubst (scope, args, complain, in_decl); finish_nonmember_using_decl (scope, name); } else if (is_capture_proxy (decl) && !DECL_TEMPLATE_INSTANTIATION (current_function_decl)) { /* We're in tsubst_lambda_expr, we've already inserted a new capture proxy, so look it up and register it. */ tree inst; if (!DECL_PACK_P (decl)) { inst = lookup_name_real (DECL_NAME (decl), /*prefer_type*/0, /*nonclass*/1, /*block_p=*/true, /*ns_only*/0, LOOKUP_HIDDEN); gcc_assert (inst != decl && is_capture_proxy (inst)); } else if (is_normal_capture_proxy (decl)) { inst = (retrieve_local_specialization (DECL_CAPTURED_VARIABLE (decl))); gcc_assert (TREE_CODE (inst) == NONTYPE_ARGUMENT_PACK || DECL_PACK_P (inst)); } else inst = lookup_init_capture_pack (decl); register_local_specialization (inst, decl); break; } else if (DECL_PRETTY_FUNCTION_P (decl)) decl = make_fname_decl (DECL_SOURCE_LOCATION (decl), DECL_NAME (decl), true/*DECL_PRETTY_FUNCTION_P (decl)*/); else if (DECL_IMPLICIT_TYPEDEF_P (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) /* Don't copy the old closure; we'll create a new one in tsubst_lambda_expr. */ break; else { init = DECL_INITIAL (decl); /* The following tsubst call will clear the DECL_TEMPLATE_INFO for local variables, so save if DECL was declared constinit. */ const bool constinit_p = (VAR_P (decl) && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (decl))); decl = tsubst (decl, args, complain, in_decl); if (decl != error_mark_node) { /* By marking the declaration as instantiated, we avoid trying to instantiate it. Since instantiate_decl can't handle local variables, and since we've already done all that needs to be done, that's the right thing to do. */ if (VAR_P (decl)) DECL_TEMPLATE_INSTANTIATED (decl) = 1; if (VAR_P (decl) && !DECL_NAME (decl) && ANON_AGGR_TYPE_P (TREE_TYPE (decl))) /* Anonymous aggregates are a special case. */ finish_anon_union (decl); else if (is_capture_proxy (DECL_EXPR_DECL (t))) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl) == this_identifier) { tree lam = DECL_CONTEXT (current_function_decl); lam = CLASSTYPE_LAMBDA_EXPR (lam); LAMBDA_EXPR_THIS_CAPTURE (lam) = decl; } insert_capture_proxy (decl); } else if (DECL_IMPLICIT_TYPEDEF_P (t)) /* We already did a pushtag. */; else if (TREE_CODE (decl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (decl) && DECL_FUNCTION_SCOPE_P (pattern_decl)) { DECL_CONTEXT (decl) = NULL_TREE; pushdecl (decl); DECL_CONTEXT (decl) = current_function_decl; cp_check_omp_declare_reduction (decl); } else { bool const_init = false; unsigned int cnt = 0; tree first = NULL_TREE, ndecl = error_mark_node; tree asmspec_tree = NULL_TREE; maybe_push_decl (decl); if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl) && TREE_TYPE (pattern_decl) != error_mark_node) ndecl = tsubst_decomp_names (decl, pattern_decl, args, complain, in_decl, &first, &cnt); init = tsubst_init (init, decl, args, complain, in_decl); if (VAR_P (decl)) const_init = (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (pattern_decl)); if (ndecl != error_mark_node) cp_maybe_mangle_decomp (ndecl, first, cnt); if (VAR_P (decl) && DECL_HARD_REGISTER (pattern_decl)) { tree id = DECL_ASSEMBLER_NAME (pattern_decl); const char *asmspec = IDENTIFIER_POINTER (id); gcc_assert (asmspec[0] == '*'); asmspec_tree = build_string (IDENTIFIER_LENGTH (id) - 1, asmspec + 1); TREE_TYPE (asmspec_tree) = char_array_type_node; } cp_finish_decl (decl, init, const_init, asmspec_tree, constinit_p ? LOOKUP_CONSTINIT : 0); if (ndecl != error_mark_node) cp_finish_decomp (ndecl, first, cnt); } } } break; } case FOR_STMT: stmt = begin_for_stmt (NULL_TREE, NULL_TREE); RECUR (FOR_INIT_STMT (t)); finish_init_stmt (stmt); tmp = RECUR (FOR_COND (t)); finish_for_cond (tmp, stmt, false, 0); tmp = RECUR (FOR_EXPR (t)); finish_for_expr (tmp, stmt); { bool prev = note_iteration_stmt_body_start (); RECUR (FOR_BODY (t)); note_iteration_stmt_body_end (prev); } finish_for_stmt (stmt); break; case RANGE_FOR_STMT: { /* Construct another range_for, if this is not a final substitution (for inside a generic lambda of a template). Otherwise convert to a regular for. */ tree decl, expr; stmt = (processing_template_decl ? begin_range_for_stmt (NULL_TREE, NULL_TREE) : begin_for_stmt (NULL_TREE, NULL_TREE)); RECUR (RANGE_FOR_INIT_STMT (t)); decl = RANGE_FOR_DECL (t); decl = tsubst (decl, args, complain, in_decl); maybe_push_decl (decl); expr = RECUR (RANGE_FOR_EXPR (t)); tree decomp_first = NULL_TREE; unsigned decomp_cnt = 0; if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl)) decl = tsubst_decomp_names (decl, RANGE_FOR_DECL (t), args, complain, in_decl, &decomp_first, &decomp_cnt); if (processing_template_decl) { RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t); RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t); finish_range_for_decl (stmt, decl, expr); if (decomp_first && decl != error_mark_node) cp_finish_decomp (decl, decomp_first, decomp_cnt); } else { unsigned short unroll = (RANGE_FOR_UNROLL (t) ? tree_to_uhwi (RANGE_FOR_UNROLL (t)) : 0); stmt = cp_convert_range_for (stmt, decl, expr, decomp_first, decomp_cnt, RANGE_FOR_IVDEP (t), unroll); } bool prev = note_iteration_stmt_body_start (); RECUR (RANGE_FOR_BODY (t)); note_iteration_stmt_body_end (prev); finish_for_stmt (stmt); } break; case WHILE_STMT: stmt = begin_while_stmt (); tmp = RECUR (WHILE_COND (t)); finish_while_stmt_cond (tmp, stmt, false, 0); { bool prev = note_iteration_stmt_body_start (); RECUR (WHILE_BODY (t)); note_iteration_stmt_body_end (prev); } finish_while_stmt (stmt); break; case DO_STMT: stmt = begin_do_stmt (); { bool prev = note_iteration_stmt_body_start (); RECUR (DO_BODY (t)); note_iteration_stmt_body_end (prev); } finish_do_body (stmt); tmp = RECUR (DO_COND (t)); finish_do_stmt (tmp, stmt, false, 0); break; case IF_STMT: stmt = begin_if_stmt (); IF_STMT_CONSTEXPR_P (stmt) = IF_STMT_CONSTEXPR_P (t); if (IF_STMT_CONSTEXPR_P (t)) args = add_extra_args (IF_STMT_EXTRA_ARGS (t), args); tmp = RECUR (IF_COND (t)); tmp = finish_if_stmt_cond (tmp, stmt); if (IF_STMT_CONSTEXPR_P (t) && instantiation_dependent_expression_p (tmp)) { /* We're partially instantiating a generic lambda, but the condition of the constexpr if is still dependent. Don't substitute into the branches now, just remember the template arguments. */ do_poplevel (IF_SCOPE (stmt)); IF_COND (stmt) = IF_COND (t); THEN_CLAUSE (stmt) = THEN_CLAUSE (t); ELSE_CLAUSE (stmt) = ELSE_CLAUSE (t); IF_STMT_EXTRA_ARGS (stmt) = build_extra_args (t, args, complain); add_stmt (stmt); break; } if (IF_STMT_CONSTEXPR_P (t) && integer_zerop (tmp)) /* Don't instantiate the THEN_CLAUSE. */; else { tree folded = fold_non_dependent_expr (tmp, complain); bool inhibit = integer_zerop (folded); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (THEN_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; } finish_then_clause (stmt); if (IF_STMT_CONSTEXPR_P (t) && integer_nonzerop (tmp)) /* Don't instantiate the ELSE_CLAUSE. */; else if (ELSE_CLAUSE (t)) { tree folded = fold_non_dependent_expr (tmp, complain); bool inhibit = integer_nonzerop (folded); begin_else_clause (stmt); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (ELSE_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; finish_else_clause (stmt); } finish_if_stmt (stmt); break; case BIND_EXPR: if (BIND_EXPR_BODY_BLOCK (t)) stmt = begin_function_body (); else stmt = begin_compound_stmt (BIND_EXPR_TRY_BLOCK (t) ? BCS_TRY_BLOCK : 0); RECUR (BIND_EXPR_BODY (t)); if (BIND_EXPR_BODY_BLOCK (t)) finish_function_body (stmt); else finish_compound_stmt (stmt); break; case BREAK_STMT: finish_break_stmt (); break; case CONTINUE_STMT: finish_continue_stmt (); break; case SWITCH_STMT: stmt = begin_switch_stmt (); tmp = RECUR (SWITCH_STMT_COND (t)); finish_switch_cond (tmp, stmt); RECUR (SWITCH_STMT_BODY (t)); finish_switch_stmt (stmt); break; case CASE_LABEL_EXPR: { tree decl = CASE_LABEL (t); tree low = RECUR (CASE_LOW (t)); tree high = RECUR (CASE_HIGH (t)); tree l = finish_case_label (EXPR_LOCATION (t), low, high); if (l && TREE_CODE (l) == CASE_LABEL_EXPR) { tree label = CASE_LABEL (l); FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl); if (DECL_ATTRIBUTES (decl) != NULL_TREE) cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0); } } break; case LABEL_EXPR: { tree decl = LABEL_EXPR_LABEL (t); tree label; label = finish_label_stmt (DECL_NAME (decl)); if (TREE_CODE (label) == LABEL_DECL) FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl); if (DECL_ATTRIBUTES (decl) != NULL_TREE) cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0); } break; case GOTO_EXPR: tmp = GOTO_DESTINATION (t); if (TREE_CODE (tmp) != LABEL_DECL) /* Computed goto's must be tsubst'd into. On the other hand, non-computed gotos must not be; the identifier in question will have no binding. */ tmp = RECUR (tmp); else tmp = DECL_NAME (tmp); finish_goto_stmt (tmp); break; case ASM_EXPR: { tree string = RECUR (ASM_STRING (t)); tree outputs = tsubst_copy_asm_operands (ASM_OUTPUTS (t), args, complain, in_decl); tree inputs = tsubst_copy_asm_operands (ASM_INPUTS (t), args, complain, in_decl); tree clobbers = tsubst_copy_asm_operands (ASM_CLOBBERS (t), args, complain, in_decl); tree labels = tsubst_copy_asm_operands (ASM_LABELS (t), args, complain, in_decl); tmp = finish_asm_stmt (EXPR_LOCATION (t), ASM_VOLATILE_P (t), string, outputs, inputs, clobbers, labels, ASM_INLINE_P (t)); tree asm_expr = tmp; if (TREE_CODE (asm_expr) == CLEANUP_POINT_EXPR) asm_expr = TREE_OPERAND (asm_expr, 0); ASM_INPUT_P (asm_expr) = ASM_INPUT_P (t); } break; case TRY_BLOCK: if (CLEANUP_P (t)) { stmt = begin_try_block (); RECUR (TRY_STMTS (t)); finish_cleanup_try_block (stmt); finish_cleanup (RECUR (TRY_HANDLERS (t)), stmt); } else { tree compound_stmt = NULL_TREE; if (FN_TRY_BLOCK_P (t)) stmt = begin_function_try_block (&compound_stmt); else stmt = begin_try_block (); RECUR (TRY_STMTS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_try_block (stmt); else finish_try_block (stmt); RECUR (TRY_HANDLERS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_handler_sequence (stmt, compound_stmt); else finish_handler_sequence (stmt); } break; case HANDLER: { tree decl = HANDLER_PARMS (t); if (decl) { decl = tsubst (decl, args, complain, in_decl); /* Prevent instantiate_decl from trying to instantiate this variable. We've already done all that needs to be done. */ if (decl != error_mark_node) DECL_TEMPLATE_INSTANTIATED (decl) = 1; } stmt = begin_handler (); finish_handler_parms (decl, stmt); RECUR (HANDLER_BODY (t)); finish_handler (stmt); } break; case TAG_DEFN: tmp = tsubst (TREE_TYPE (t), args, complain, NULL_TREE); if (CLASS_TYPE_P (tmp)) { /* Local classes are not independent templates; they are instantiated along with their containing function. And this way we don't have to deal with pushing out of one local class to instantiate a member of another local class. */ /* Closures are handled by the LAMBDA_EXPR. */ gcc_assert (!LAMBDA_TYPE_P (TREE_TYPE (t))); complete_type (tmp); for (tree fld = TYPE_FIELDS (tmp); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class=*/false); } break; case STATIC_ASSERT: { tree condition; ++c_inhibit_evaluation_warnings; condition = tsubst_expr (STATIC_ASSERT_CONDITION (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); --c_inhibit_evaluation_warnings; finish_static_assert (condition, STATIC_ASSERT_MESSAGE (t), STATIC_ASSERT_SOURCE_LOCATION (t), /*member_p=*/false); } break; case OACC_KERNELS: case OACC_PARALLEL: case OACC_SERIAL: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); stmt = begin_omp_parallel (); RECUR (OMP_BODY (t)); finish_omp_construct (TREE_CODE (t), stmt, tmp); break; case OMP_PARALLEL: r = push_omp_privatization_clauses (OMP_PARALLEL_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_PARALLEL_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (OMP_PARALLEL_COMBINED (t)) omp_parallel_combined_clauses = &tmp; stmt = begin_omp_parallel (); RECUR (OMP_PARALLEL_BODY (t)); gcc_assert (omp_parallel_combined_clauses == NULL); OMP_PARALLEL_COMBINED (finish_omp_parallel (tmp, stmt)) = OMP_PARALLEL_COMBINED (t); pop_omp_privatization_clauses (r); break; case OMP_TASK: if (OMP_TASK_BODY (t) == NULL_TREE) { tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); t = copy_node (t); OMP_TASK_CLAUSES (t) = tmp; add_stmt (t); break; } r = push_omp_privatization_clauses (false); tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = begin_omp_task (); RECUR (OMP_TASK_BODY (t)); finish_omp_task (tmp, stmt); pop_omp_privatization_clauses (r); break; case OMP_FOR: case OMP_LOOP: case OMP_SIMD: case OMP_DISTRIBUTE: case OMP_TASKLOOP: case OACC_LOOP: { tree clauses, body, pre_body; tree declv = NULL_TREE, initv = NULL_TREE, condv = NULL_TREE; tree orig_declv = NULL_TREE; tree incrv = NULL_TREE; enum c_omp_region_type ort = C_ORT_OMP; bool any_range_for = false; int i; if (TREE_CODE (t) == OACC_LOOP) ort = C_ORT_ACC; r = push_omp_privatization_clauses (OMP_FOR_INIT (t) == NULL_TREE); clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t), ort, args, complain, in_decl); if (OMP_FOR_INIT (t) != NULL_TREE) { declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); if (OMP_FOR_ORIG_DECLS (t)) orig_declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); } keep_next_level (true); stmt = begin_omp_structured_block (); pre_body = push_stmt_list (); RECUR (OMP_FOR_PRE_BODY (t)); pre_body = pop_stmt_list (pre_body); if (OMP_FOR_INIT (t) != NULL_TREE) for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++) any_range_for |= tsubst_omp_for_iterator (t, i, declv, orig_declv, initv, condv, incrv, &clauses, args, complain, in_decl, integral_constant_expression_p); omp_parallel_combined_clauses = NULL; if (any_range_for) { gcc_assert (orig_declv); body = begin_omp_structured_block (); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++) if (TREE_VEC_ELT (orig_declv, i) != TREE_VEC_ELT (declv, i) && TREE_CODE (TREE_VEC_ELT (orig_declv, i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (orig_declv, i))) cp_finish_omp_range_for (TREE_VEC_ELT (orig_declv, i), TREE_VEC_ELT (declv, i)); } else body = push_stmt_list (); RECUR (OMP_FOR_BODY (t)); if (any_range_for) body = finish_omp_structured_block (body); else body = pop_stmt_list (body); if (OMP_FOR_INIT (t) != NULL_TREE) t = finish_omp_for (EXPR_LOCATION (t), TREE_CODE (t), declv, orig_declv, initv, condv, incrv, body, pre_body, NULL, clauses); else { t = make_node (TREE_CODE (t)); TREE_TYPE (t) = void_type_node; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, EXPR_LOCATION (t)); add_stmt (t); } add_stmt (finish_omp_for_block (finish_omp_structured_block (stmt), t)); pop_omp_privatization_clauses (r); } break; case OMP_SECTIONS: omp_parallel_combined_clauses = NULL; /* FALLTHRU */ case OMP_SINGLE: case OMP_TEAMS: case OMP_CRITICAL: case OMP_TASKGROUP: case OMP_SCAN: r = push_omp_privatization_clauses (TREE_CODE (t) == OMP_TEAMS && OMP_TEAMS_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (TREE_CODE (t) == OMP_TEAMS) { keep_next_level (true); stmt = begin_omp_structured_block (); RECUR (OMP_BODY (t)); stmt = finish_omp_structured_block (stmt); } else { stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); } t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; add_stmt (t); pop_omp_privatization_clauses (r); break; case OMP_DEPOBJ: r = RECUR (OMP_DEPOBJ_DEPOBJ (t)); if (OMP_DEPOBJ_CLAUSES (t) && OMP_DEPOBJ_CLAUSES (t) != error_mark_node) { enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_SOURCE; if (TREE_CODE (OMP_DEPOBJ_CLAUSES (t)) == OMP_CLAUSE) { tmp = tsubst_omp_clauses (OMP_DEPOBJ_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (tmp == NULL_TREE) tmp = error_mark_node; } else { kind = (enum omp_clause_depend_kind) tree_to_uhwi (OMP_DEPOBJ_CLAUSES (t)); tmp = NULL_TREE; } finish_omp_depobj (EXPR_LOCATION (t), r, kind, tmp); } else finish_omp_depobj (EXPR_LOCATION (t), r, OMP_CLAUSE_DEPEND_SOURCE, OMP_DEPOBJ_CLAUSES (t)); break; case OACC_DATA: case OMP_TARGET_DATA: case OMP_TARGET: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), (TREE_CODE (t) == OACC_DATA) ? C_ORT_ACC : C_ORT_OMP, args, complain, in_decl); keep_next_level (true); stmt = begin_omp_structured_block (); RECUR (OMP_BODY (t)); stmt = finish_omp_structured_block (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; if (TREE_CODE (t) == OMP_TARGET && OMP_TARGET_COMBINED (t)) { tree teams = cp_walk_tree (&stmt, tsubst_find_omp_teams, NULL, NULL); if (teams) { /* For combined target teams, ensure the num_teams and thread_limit clause expressions are evaluated on the host, before entering the target construct. */ tree c; for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c)) if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT) && TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST) { tree expr = OMP_CLAUSE_OPERAND (c, 0); expr = force_target_expr (TREE_TYPE (expr), expr, tf_none); if (expr == error_mark_node) continue; tmp = TARGET_EXPR_SLOT (expr); add_stmt (expr); OMP_CLAUSE_OPERAND (c, 0) = expr; tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (tc) = tmp; OMP_CLAUSE_CHAIN (tc) = OMP_TARGET_CLAUSES (t); OMP_TARGET_CLAUSES (t) = tc; } } } add_stmt (t); break; case OACC_DECLARE: t = copy_node (t); tmp = tsubst_omp_clauses (OACC_DECLARE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); OACC_DECLARE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_TARGET_UPDATE: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OACC_CACHE: case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_UPDATE: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_ORDERED: tmp = tsubst_omp_clauses (OMP_ORDERED_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_ORDERED_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_MASTER: omp_parallel_combined_clauses = NULL; /* FALLTHRU */ case OMP_SECTION: stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; add_stmt (t); break; case OMP_ATOMIC: gcc_assert (OMP_ATOMIC_DEPENDENT_P (t)); tmp = NULL_TREE; if (TREE_CODE (TREE_OPERAND (t, 0)) == OMP_CLAUSE) tmp = tsubst_omp_clauses (TREE_OPERAND (t, 0), C_ORT_OMP, args, complain, in_decl); if (TREE_CODE (TREE_OPERAND (t, 1)) != MODIFY_EXPR) { tree op1 = TREE_OPERAND (t, 1); tree rhs1 = NULL_TREE; tree lhs, rhs; if (TREE_CODE (op1) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op1, 0)); op1 = TREE_OPERAND (op1, 1); } lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); finish_omp_atomic (EXPR_LOCATION (t), OMP_ATOMIC, TREE_CODE (op1), lhs, rhs, NULL_TREE, NULL_TREE, rhs1, tmp, OMP_ATOMIC_MEMORY_ORDER (t)); } else { tree op1 = TREE_OPERAND (t, 1); tree v = NULL_TREE, lhs, rhs = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE; enum tree_code code = TREE_CODE (TREE_OPERAND (op1, 1)); enum tree_code opcode = NOP_EXPR; if (code == OMP_ATOMIC_READ) { v = RECUR (TREE_OPERAND (op1, 0)); lhs = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); } else if (code == OMP_ATOMIC_CAPTURE_OLD || code == OMP_ATOMIC_CAPTURE_NEW) { tree op11 = TREE_OPERAND (TREE_OPERAND (op1, 1), 1); v = RECUR (TREE_OPERAND (op1, 0)); lhs1 = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); if (TREE_CODE (op11) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op11, 0)); op11 = TREE_OPERAND (op11, 1); } lhs = RECUR (TREE_OPERAND (op11, 0)); rhs = RECUR (TREE_OPERAND (op11, 1)); opcode = TREE_CODE (op11); if (opcode == MODIFY_EXPR) opcode = NOP_EXPR; } else { code = OMP_ATOMIC; lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); } finish_omp_atomic (EXPR_LOCATION (t), code, opcode, lhs, rhs, v, lhs1, rhs1, tmp, OMP_ATOMIC_MEMORY_ORDER (t)); } break; case TRANSACTION_EXPR: { int flags = 0; flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0); flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0); if (TRANSACTION_EXPR_IS_STMT (t)) { tree body = TRANSACTION_EXPR_BODY (t); tree noex = NULL_TREE; if (TREE_CODE (body) == MUST_NOT_THROW_EXPR) { noex = MUST_NOT_THROW_COND (body); if (noex == NULL_TREE) noex = boolean_true_node; body = TREE_OPERAND (body, 0); } stmt = begin_transaction_stmt (input_location, NULL, flags); RECUR (body); finish_transaction_stmt (stmt, NULL, flags, RECUR (noex)); } else { stmt = build_transaction_expr (EXPR_LOCATION (t), RECUR (TRANSACTION_EXPR_BODY (t)), flags, NULL_TREE); RETURN (stmt); } } break; case MUST_NOT_THROW_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree cond = RECUR (MUST_NOT_THROW_COND (t)); RETURN (build_must_not_throw_expr (op0, cond)); } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); RETURN (error_mark_node); case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); RETURN (error_mark_node); case COMPOUND_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); if (tmp == NULL_TREE) /* If the first operand was a statement, we're done with it. */ RETURN (RECUR (TREE_OPERAND (t, 1))); RETURN (build_x_compound_expr (EXPR_LOCATION (t), tmp, RECUR (TREE_OPERAND (t, 1)), complain)); case ANNOTATE_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR, TREE_TYPE (tmp), tmp, RECUR (TREE_OPERAND (t, 1)), RECUR (TREE_OPERAND (t, 2)))); case PREDICT_EXPR: RETURN (add_stmt (copy_node (t))); default: gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t))); RETURN (tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, integral_constant_expression_p)); } RETURN (NULL_TREE); out: input_location = loc; return r; #undef RECUR #undef RETURN } /* Instantiate the special body of the artificial DECL_OMP_DECLARE_REDUCTION function. For description of the body see comment above cp_parser_omp_declare_reduction_exprs. */ static void tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (t == NULL_TREE || t == error_mark_node) return; gcc_assert (TREE_CODE (t) == STATEMENT_LIST); tree_stmt_iterator tsi; int i; tree stmts[7]; memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (t); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); tree omp_out = tsubst (DECL_EXPR_DECL (stmts[0]), args, complain, in_decl); tree omp_in = tsubst (DECL_EXPR_DECL (stmts[1]), args, complain, in_decl); DECL_CONTEXT (omp_out) = current_function_decl; DECL_CONTEXT (omp_in) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[2], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); add_decl_expr (omp_out); if (TREE_NO_WARNING (DECL_EXPR_DECL (stmts[0]))) TREE_NO_WARNING (omp_out) = 1; add_decl_expr (omp_in); finish_expr_stmt (block); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); tree omp_priv = tsubst (DECL_EXPR_DECL (stmts[3]), args, complain, in_decl); tree omp_orig = tsubst (DECL_EXPR_DECL (stmts[4]), args, complain, in_decl); DECL_CONTEXT (omp_priv) = current_function_decl; DECL_CONTEXT (omp_orig) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[5], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL); add_decl_expr (omp_priv); add_decl_expr (omp_orig); finish_expr_stmt (block); if (i == 7) add_decl_expr (omp_orig); } } /* T is a postfix-expression that is not being used in a function call. Return the substituted version of T. */ static tree tsubst_non_call_postfix_expression (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (TREE_CODE (t) == SCOPE_REF) t = tsubst_qualified_id (t, args, complain, in_decl, /*done=*/false, /*address_p=*/false); else t = tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); return t; } /* Subroutine of tsubst_lambda_expr: add the FIELD/INIT capture pair to the LAMBDA_EXPR_CAPTURE_LIST passed in LIST. Do deduction for a previously dependent init-capture. */ static void prepend_one_capture (tree field, tree init, tree &list, tsubst_flags_t complain) { if (tree auto_node = type_uses_auto (TREE_TYPE (field))) { tree type = NULL_TREE; if (!init) { if (complain & tf_error) error ("empty initializer in lambda init-capture"); init = error_mark_node; } else if (TREE_CODE (init) == TREE_LIST) init = build_x_compound_expr_from_list (init, ELK_INIT, complain); if (!type) type = do_auto_deduction (TREE_TYPE (field), init, auto_node, complain); TREE_TYPE (field) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), field); } list = tree_cons (field, init, list); } /* T is a LAMBDA_EXPR. Generate a new LAMBDA_EXPR for the current instantiation context. Instantiating a pack expansion containing a lambda might result in multiple lambdas all based on the same lambda in the template. */ tree tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree oldfn = lambda_function (t); in_decl = oldfn; tree r = build_lambda_expr (); LAMBDA_EXPR_LOCATION (r) = LAMBDA_EXPR_LOCATION (t); LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r) = LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t); LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t); LAMBDA_EXPR_INSTANTIATED (r) = true; if (LAMBDA_EXPR_EXTRA_SCOPE (t) == NULL_TREE) /* A lambda in a default argument outside a class gets no LAMBDA_EXPR_EXTRA_SCOPE, as specified by the ABI. But tsubst_default_argument calls start_lambda_scope, so we need to specifically ignore it here, and use the global scope. */ record_null_lambda_scope (r); else record_lambda_scope (r); gcc_assert (LAMBDA_EXPR_THIS_CAPTURE (t) == NULL_TREE && LAMBDA_EXPR_PENDING_PROXIES (t) == NULL); vec<tree,va_gc>* field_packs = NULL; for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap; cap = TREE_CHAIN (cap)) { tree ofield = TREE_PURPOSE (cap); tree init = TREE_VALUE (cap); if (PACK_EXPANSION_P (init)) init = tsubst_pack_expansion (init, args, complain, in_decl); else init = tsubst_copy_and_build (init, args, complain, in_decl, /*fn*/false, /*constexpr*/false); if (init == error_mark_node) return error_mark_node; if (init && TREE_CODE (init) == TREE_LIST) init = build_x_compound_expr_from_list (init, ELK_INIT, complain); if (!processing_template_decl && init && TREE_CODE (init) != TREE_VEC && variably_modified_type_p (TREE_TYPE (init), NULL_TREE)) { /* For a VLA, simply tsubsting the field type won't work, we need to go through add_capture again. XXX do we want to do this for all captures? */ tree name = (get_identifier (IDENTIFIER_POINTER (DECL_NAME (ofield)) + 2)); tree ftype = TREE_TYPE (ofield); bool by_ref = (TYPE_REF_P (ftype) || (TREE_CODE (ftype) == DECLTYPE_TYPE && DECLTYPE_FOR_REF_CAPTURE (ftype))); add_capture (r, name, init, by_ref, !DECL_NORMAL_CAPTURE_P (ofield)); continue; } if (PACK_EXPANSION_P (ofield)) ofield = PACK_EXPANSION_PATTERN (ofield); tree field = tsubst_decl (ofield, args, complain); if (DECL_PACK_P (ofield) && !DECL_NORMAL_CAPTURE_P (ofield)) { /* Remember these for when we've pushed local_specializations. */ vec_safe_push (field_packs, ofield); vec_safe_push (field_packs, field); } if (field == error_mark_node) return error_mark_node; if (TREE_CODE (field) == TREE_VEC) { int len = TREE_VEC_LENGTH (field); gcc_assert (TREE_CODE (init) == TREE_VEC && TREE_VEC_LENGTH (init) == len); for (int i = 0; i < len; ++i) prepend_one_capture (TREE_VEC_ELT (field, i), TREE_VEC_ELT (init, i), LAMBDA_EXPR_CAPTURE_LIST (r), complain); } else { prepend_one_capture (field, init, LAMBDA_EXPR_CAPTURE_LIST (r), complain); if (id_equal (DECL_NAME (field), "__this")) LAMBDA_EXPR_THIS_CAPTURE (r) = field; } } tree type = begin_lambda_type (r); if (type == error_mark_node) return error_mark_node; /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (r)); tree oldtmpl = (generic_lambda_fn_p (oldfn) ? DECL_TI_TEMPLATE (oldfn) : NULL_TREE); tree fntype = static_fn_type (oldfn); if (oldtmpl) ++processing_template_decl; fntype = tsubst (fntype, args, complain, in_decl); if (oldtmpl) --processing_template_decl; if (fntype == error_mark_node) r = error_mark_node; else { /* The body of a lambda-expression is not a subexpression of the enclosing expression. Parms are to have DECL_CHAIN tsubsted, which would be skipped if cp_unevaluated_operand. */ cp_evaluated ev; /* Fix the type of 'this'. */ fntype = build_memfn_type (fntype, type, type_memfn_quals (fntype), type_memfn_rqual (fntype)); tree fn, tmpl; if (oldtmpl) { tmpl = tsubst_template_decl (oldtmpl, args, complain, fntype); if (tmpl == error_mark_node) { r = error_mark_node; goto out; } fn = DECL_TEMPLATE_RESULT (tmpl); finish_member_declaration (tmpl); } else { tmpl = NULL_TREE; fn = tsubst_function_decl (oldfn, args, complain, fntype); if (fn == error_mark_node) { r = error_mark_node; goto out; } finish_member_declaration (fn); } if (tree ci = get_constraints (oldfn)) { /* Substitute into the lambda's constraints. */ if (oldtmpl) ++processing_template_decl; ci = tsubst_constraint_info (ci, args, complain, in_decl); if (oldtmpl) --processing_template_decl; set_constraints (fn, ci); } /* Let finish_function set this. */ DECL_DECLARED_CONSTEXPR_P (fn) = false; bool nested = cfun; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; local_specialization_stack s (lss_copy); tree body = start_lambda_function (fn, r); /* Now record them for lookup_init_capture_pack. */ int fplen = vec_safe_length (field_packs); for (int i = 0; i < fplen; ) { tree pack = (*field_packs)[i++]; tree inst = (*field_packs)[i++]; register_local_specialization (inst, pack); } release_tree_vector (field_packs); register_parameter_specializations (oldfn, fn); if (oldtmpl) { /* We might not partially instantiate some parts of the function, so copy these flags from the original template. */ language_function *ol = DECL_STRUCT_FUNCTION (oldfn)->language; current_function_returns_value = ol->returns_value; current_function_returns_null = ol->returns_null; current_function_returns_abnormally = ol->returns_abnormally; current_function_infinite_loop = ol->infinite_loop; } /* [temp.deduct] A lambda-expression appearing in a function type or a template parameter is not considered part of the immediate context for the purposes of template argument deduction. */ complain = tf_warning_or_error; tsubst_expr (DECL_SAVED_TREE (oldfn), args, complain, r, /*constexpr*/false); finish_lambda_function (body); if (nested) pop_function_context (); else --function_depth; /* The capture list was built up in reverse order; fix that now. */ LAMBDA_EXPR_CAPTURE_LIST (r) = nreverse (LAMBDA_EXPR_CAPTURE_LIST (r)); LAMBDA_EXPR_THIS_CAPTURE (r) = NULL_TREE; maybe_add_lambda_conv_op (type); } out: finish_struct (type, /*attr*/NULL_TREE); insert_pending_capture_proxies (); return r; } /* Like tsubst but deals with expressions and performs semantic analysis. FUNCTION_P is true if T is the "F" in "F (ARGS)" or "F<TARGS> (ARGS)". */ tree tsubst_copy_and_build (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool function_p, bool integral_constant_expression_p) { #define RETURN(EXP) do { retval = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_copy_and_build (NODE, args, complain, in_decl, \ /*function_p=*/false, \ integral_constant_expression_p) tree retval, op1; location_t save_loc; if (t == NULL_TREE || t == error_mark_node) return t; save_loc = input_location; if (location_t eloc = cp_expr_location (t)) input_location = eloc; /* N3276 decltype magic only applies to calls at the top level or on the right side of a comma. */ tsubst_flags_t decltype_flag = (complain & tf_decltype); complain &= ~tf_decltype; switch (TREE_CODE (t)) { case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: { tree decl; cp_id_kind idk; bool non_integral_constant_expression_p; const char *error_msg; if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); t = make_conv_op_name (new_type); } /* Look up the name. */ decl = lookup_name (t); /* By convention, expressions use ERROR_MARK_NODE to indicate failure, not NULL_TREE. */ if (decl == NULL_TREE) decl = error_mark_node; decl = finish_id_expression (t, decl, NULL_TREE, &idk, integral_constant_expression_p, /*allow_non_integral_constant_expression_p=*/(cxx_dialect >= cxx11), &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, input_location); if (error_msg) error (error_msg); if (!function_p && identifier_p (decl)) { if (complain & tf_error) unqualified_name_lookup_error (decl); decl = error_mark_node; } RETURN (decl); } case TEMPLATE_ID_EXPR: { tree object; tree templ = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, complain, in_decl, function_p, integral_constant_expression_p); tree targs = TREE_OPERAND (t, 1); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); if (targs == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (templ) == SCOPE_REF) { tree name = TREE_OPERAND (templ, 1); tree tid = lookup_template_function (name, targs); TREE_OPERAND (templ, 1) = tid; RETURN (templ); } if (concept_definition_p (templ)) { tree check = build_concept_check (templ, targs, complain); if (check == error_mark_node) RETURN (error_mark_node); tree id = unpack_concept_check (check); /* If we built a function concept check, return the underlying template-id. So we can evaluate it as a function call. */ if (function_concept_p (TREE_OPERAND (id, 0))) RETURN (id); RETURN (check); } if (variable_template_p (templ)) { tree r = lookup_and_finish_template_variable (templ, targs, complain); r = maybe_wrap_with_location (r, EXPR_LOCATION (t)); RETURN (r); } if (TREE_CODE (templ) == COMPONENT_REF) { object = TREE_OPERAND (templ, 0); templ = TREE_OPERAND (templ, 1); } else object = NULL_TREE; tree tid = lookup_template_function (templ, targs); if (object) RETURN (build3 (COMPONENT_REF, TREE_TYPE (tid), object, tid, NULL_TREE)); else if (identifier_p (templ)) { /* C++20 P0846: we can encounter an IDENTIFIER_NODE here when name lookup found nothing when parsing the template name. */ gcc_assert (cxx_dialect >= cxx2a || seen_error ()); RETURN (tid); } else RETURN (baselink_for_fns (tid)); } case INDIRECT_REF: { tree r = RECUR (TREE_OPERAND (t, 0)); if (REFERENCE_REF_P (t)) { /* A type conversion to reference type will be enclosed in such an indirect ref, but the substitution of the cast will have also added such an indirect ref. */ r = convert_from_reference (r); } else r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR, complain|decltype_flag); if (REF_PARENTHESIZED_P (t)) r = force_paren_expr (r); RETURN (r); } case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); RETURN (build_nop (type, op0)); } case IMPLICIT_CONV_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree expr = RECUR (TREE_OPERAND (t, 0)); if (dependent_type_p (type) || type_dependent_expression_p (expr)) { retval = copy_node (t); TREE_TYPE (retval) = type; TREE_OPERAND (retval, 0) = expr; RETURN (retval); } if (IMPLICIT_CONV_EXPR_NONTYPE_ARG (t)) /* We'll pass this to convert_nontype_argument again, we don't need to actually perform any conversion here. */ RETURN (expr); int flags = LOOKUP_IMPLICIT; if (IMPLICIT_CONV_EXPR_DIRECT_INIT (t)) flags = LOOKUP_NORMAL; if (IMPLICIT_CONV_EXPR_BRACED_INIT (t)) flags |= LOOKUP_NO_NARROWING; RETURN (perform_implicit_conversion_flags (type, expr, complain, flags)); } case CONVERT_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); if (op0 == error_mark_node) RETURN (error_mark_node); RETURN (build1 (CONVERT_EXPR, type, op0)); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: { tree type; tree op, r = NULL_TREE; type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (integral_constant_expression_p && !cast_valid_in_integral_constant_expression_p (type)) { if (complain & tf_error) error ("a cast to a type other than an integral or " "enumeration type cannot appear in a constant-expression"); RETURN (error_mark_node); } op = RECUR (TREE_OPERAND (t, 0)); warning_sentinel s(warn_useless_cast); warning_sentinel s2(warn_ignored_qualifiers); switch (TREE_CODE (t)) { case CAST_EXPR: r = build_functional_cast (input_location, type, op, complain); break; case REINTERPRET_CAST_EXPR: r = build_reinterpret_cast (input_location, type, op, complain); break; case CONST_CAST_EXPR: r = build_const_cast (input_location, type, op, complain); break; case DYNAMIC_CAST_EXPR: r = build_dynamic_cast (input_location, type, op, complain); break; case STATIC_CAST_EXPR: r = build_static_cast (input_location, type, op, complain); break; default: gcc_unreachable (); } RETURN (r); } case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_unary_op (input_location, TREE_CODE (t), op1, complain|decltype_flag)); case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case NEGATE_EXPR: case BIT_NOT_EXPR: case ABS_EXPR: case TRUTH_NOT_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case REALPART_EXPR: case IMAGPART_EXPR: RETURN (build_x_unary_op (input_location, TREE_CODE (t), RECUR (TREE_OPERAND (t, 0)), complain|decltype_flag)); case FIX_TRUNC_EXPR: gcc_unreachable (); case ADDR_EXPR: op1 = TREE_OPERAND (t, 0); if (TREE_CODE (op1) == LABEL_DECL) RETURN (finish_label_address_expr (DECL_NAME (op1), EXPR_LOCATION (op1))); if (TREE_CODE (op1) == SCOPE_REF) op1 = tsubst_qualified_id (op1, args, complain, in_decl, /*done=*/true, /*address_p=*/true); else op1 = tsubst_non_call_postfix_expression (op1, args, complain, in_decl); RETURN (build_x_unary_op (input_location, ADDR_EXPR, op1, complain|decltype_flag)); case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case SPACESHIP_EXPR: case MEMBER_REF: case DOTSTAR_EXPR: { /* If T was type-dependent, suppress warnings that depend on the range of the types involved. */ ++processing_template_decl; const bool was_dep = (potential_constant_expression (t) ? value_dependent_expression_p (t) : type_dependent_expression_p (t)); --processing_template_decl; tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); warning_sentinel s1(warn_type_limits, was_dep); warning_sentinel s2(warn_div_by_zero, was_dep); warning_sentinel s3(warn_logical_op, was_dep); warning_sentinel s4(warn_tautological_compare, was_dep); tree r = build_x_binary_op (input_location, TREE_CODE (t), op0, (TREE_NO_WARNING (TREE_OPERAND (t, 0)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 0))), op1, (TREE_NO_WARNING (TREE_OPERAND (t, 1)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 1))), /*overload=*/NULL, complain|decltype_flag); if (EXPR_P (r) && TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case POINTER_PLUS_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); if (op0 == error_mark_node) RETURN (error_mark_node); tree op1 = RECUR (TREE_OPERAND (t, 1)); if (op1 == error_mark_node) RETURN (error_mark_node); RETURN (fold_build_pointer_plus (op0, op1)); } case SCOPE_REF: RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true, /*address_p=*/false)); case BASELINK: RETURN (tsubst_baselink (t, current_nonlambda_class_type (), args, complain, in_decl)); case ARRAY_REF: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_array_ref (EXPR_LOCATION (t), op1, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) RETURN (tsubst_copy (t, args, complain, in_decl)); /* Fall through */ case ALIGNOF_EXPR: { tree r; op1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t)) op1 = TREE_TYPE (op1); bool std_alignof = (TREE_CODE (t) == ALIGNOF_EXPR && ALIGNOF_EXPR_STD_P (t)); if (!args) { /* When there are no ARGS, we are trying to evaluate a non-dependent expression from the parser. Trying to do the substitutions may not work. */ if (!TYPE_P (op1)) op1 = TREE_TYPE (op1); } else { ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; if (TYPE_P (op1)) op1 = tsubst (op1, args, complain, in_decl); else op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/ false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; } if (TYPE_P (op1)) r = cxx_sizeof_or_alignof_type (input_location, op1, TREE_CODE (t), std_alignof, complain & tf_error); else r = cxx_sizeof_or_alignof_expr (input_location, op1, TREE_CODE (t), complain & tf_error); if (TREE_CODE (t) == SIZEOF_EXPR && r != error_mark_node) { if (TREE_CODE (r) != SIZEOF_EXPR || TYPE_P (op1)) { if (!processing_template_decl && TYPE_P (op1)) { r = build_min (SIZEOF_EXPR, size_type_node, build1 (NOP_EXPR, op1, error_mark_node)); SIZEOF_EXPR_TYPE_P (r) = 1; } else r = build_min (SIZEOF_EXPR, size_type_node, op1); TREE_SIDE_EFFECTS (r) = 0; TREE_READONLY (r) = 1; } SET_EXPR_LOCATION (r, EXPR_LOCATION (t)); } RETURN (r); } case AT_ENCODE_EXPR: { op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; RETURN (objc_build_encode_expr (op1)); } case NOEXCEPT_EXPR: op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; ++cp_noexcept_operand; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; --cp_noexcept_operand; RETURN (finish_noexcept_expr (op1, complain)); case MODOP_EXPR: { warning_sentinel s(warn_div_by_zero); tree lhs = RECUR (TREE_OPERAND (t, 0)); tree rhs = RECUR (TREE_OPERAND (t, 2)); tree r = build_x_modify_expr (EXPR_LOCATION (t), lhs, TREE_CODE (TREE_OPERAND (t, 1)), rhs, complain|decltype_flag); /* TREE_NO_WARNING must be set if either the expression was parenthesized or it uses an operator such as >>= rather than plain assignment. In the former case, it was already set and must be copied. In the latter case, build_x_modify_expr sets it and it must not be reset here. */ if (TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case ARROW_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (op1) && !mark_used (op1, complain) && !(complain & tf_error)) RETURN (error_mark_node); RETURN (build_x_arrow (input_location, op1, complain)); case NEW_EXPR: { tree placement = RECUR (TREE_OPERAND (t, 0)); tree init = RECUR (TREE_OPERAND (t, 3)); vec<tree, va_gc> *placement_vec; vec<tree, va_gc> *init_vec; tree ret; location_t loc = EXPR_LOCATION (t); if (placement == NULL_TREE) placement_vec = NULL; else { placement_vec = make_tree_vector (); for (; placement != NULL_TREE; placement = TREE_CHAIN (placement)) vec_safe_push (placement_vec, TREE_VALUE (placement)); } /* If there was an initializer in the original tree, but it instantiated to an empty list, then we should pass a non-NULL empty vector to tell build_new that it was an empty initializer() rather than no initializer. This can only happen when the initializer is a pack expansion whose parameter packs are of length zero. */ if (init == NULL_TREE && TREE_OPERAND (t, 3) == NULL_TREE) init_vec = NULL; else { init_vec = make_tree_vector (); if (init == void_node) gcc_assert (init_vec != NULL); else { for (; init != NULL_TREE; init = TREE_CHAIN (init)) vec_safe_push (init_vec, TREE_VALUE (init)); } } /* Avoid passing an enclosing decl to valid_array_size_p. */ in_decl = NULL_TREE; tree op1 = tsubst (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = RECUR (TREE_OPERAND (t, 2)); ret = build_new (loc, &placement_vec, op1, op2, &init_vec, NEW_EXPR_USE_GLOBAL (t), complain); if (placement_vec != NULL) release_tree_vector (placement_vec); if (init_vec != NULL) release_tree_vector (init_vec); RETURN (ret); } case DELETE_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); RETURN (delete_sanity (input_location, op0, op1, DELETE_EXPR_USE_VEC (t), DELETE_EXPR_USE_GLOBAL (t), complain)); } case COMPOUND_EXPR: { tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, complain & ~tf_decltype, in_decl, /*function_p=*/false, integral_constant_expression_p); RETURN (build_x_compound_expr (EXPR_LOCATION (t), op0, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); } case CALL_EXPR: { tree function; unsigned int nargs, i; bool qualified_p; bool koenig_p; tree ret; function = CALL_EXPR_FN (t); /* Internal function with no arguments. */ if (function == NULL_TREE && call_expr_nargs (t) == 0) RETURN (t); /* When we parsed the expression, we determined whether or not Koenig lookup should be performed. */ koenig_p = KOENIG_LOOKUP_P (t); if (function == NULL_TREE) { koenig_p = false; qualified_p = false; } else if (TREE_CODE (function) == SCOPE_REF) { qualified_p = true; function = tsubst_qualified_id (function, args, complain, in_decl, /*done=*/false, /*address_p=*/false); } else if (koenig_p && identifier_p (function)) { /* Do nothing; calling tsubst_copy_and_build on an identifier would incorrectly perform unqualified lookup again. Note that we can also have an IDENTIFIER_NODE if the earlier unqualified lookup found a member function; in that case koenig_p will be false and we do want to do the lookup again to find the instantiated member function. FIXME but doing that causes c++/15272, so we need to stop using IDENTIFIER_NODE in that situation. */ qualified_p = false; } else { if (TREE_CODE (function) == COMPONENT_REF) { tree op = TREE_OPERAND (function, 1); qualified_p = (TREE_CODE (op) == SCOPE_REF || (BASELINK_P (op) && BASELINK_QUALIFIED_P (op))); } else qualified_p = false; if (TREE_CODE (function) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL) /* Avoid error about taking the address of a constructor. */ function = TREE_OPERAND (function, 0); function = tsubst_copy_and_build (function, args, complain, in_decl, !qualified_p, integral_constant_expression_p); if (BASELINK_P (function)) qualified_p = true; } nargs = call_expr_nargs (t); releasing_vec call_args; for (i = 0; i < nargs; ++i) { tree arg = CALL_EXPR_ARG (t, i); if (!PACK_EXPANSION_P (arg)) vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i))); else { /* Expand the pack expansion and push each entry onto CALL_ARGS. */ arg = tsubst_pack_expansion (arg, args, complain, in_decl); if (TREE_CODE (arg) == TREE_VEC) { unsigned int len, j; len = TREE_VEC_LENGTH (arg); for (j = 0; j < len; ++j) { tree value = TREE_VEC_ELT (arg, j); if (value != NULL_TREE) value = convert_from_reference (value); vec_safe_push (call_args, value); } } else { /* A partial substitution. Add one entry. */ vec_safe_push (call_args, arg); } } } /* Stripped-down processing for a call in a thunk. Specifically, in the thunk template for a generic lambda. */ if (CALL_FROM_THUNK_P (t)) { /* Now that we've expanded any packs, the number of call args might be different. */ unsigned int cargs = call_args->length (); tree thisarg = NULL_TREE; if (TREE_CODE (function) == COMPONENT_REF) { thisarg = TREE_OPERAND (function, 0); if (TREE_CODE (thisarg) == INDIRECT_REF) thisarg = TREE_OPERAND (thisarg, 0); function = TREE_OPERAND (function, 1); if (TREE_CODE (function) == BASELINK) function = BASELINK_FUNCTIONS (function); } /* We aren't going to do normal overload resolution, so force the template-id to resolve. */ function = resolve_nondeduced_context (function, complain); for (unsigned i = 0; i < cargs; ++i) { /* In a thunk, pass through args directly, without any conversions. */ tree arg = (*call_args)[i]; while (TREE_CODE (arg) != PARM_DECL) arg = TREE_OPERAND (arg, 0); (*call_args)[i] = arg; } if (thisarg) { /* If there are no other args, just push 'this'. */ if (cargs == 0) vec_safe_push (call_args, thisarg); else { /* Otherwise, shift the other args over to make room. */ tree last = (*call_args)[cargs - 1]; vec_safe_push (call_args, last); for (int i = cargs - 1; i > 0; --i) (*call_args)[i] = (*call_args)[i - 1]; (*call_args)[0] = thisarg; } } ret = build_call_a (function, call_args->length (), call_args->address ()); /* The thunk location is not interesting. */ SET_EXPR_LOCATION (ret, UNKNOWN_LOCATION); CALL_FROM_THUNK_P (ret) = true; if (CLASS_TYPE_P (TREE_TYPE (ret))) CALL_EXPR_RETURN_SLOT_OPT (ret) = true; RETURN (ret); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the resolution of DR 218. */ if (koenig_p && ((is_overloaded_fn (function) /* If lookup found a member function, the Koenig lookup is not appropriate, even if an unqualified-name was used to denote the function. */ && !DECL_FUNCTION_MEMBER_P (get_first_fn (function))) || identifier_p (function) /* C++20 P0846: Lookup found nothing. */ || (TREE_CODE (function) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (function, 0)))) /* Only do this when substitution turns a dependent call into a non-dependent call. */ && type_dependent_expression_p_push (t) && !any_type_dependent_arguments_p (call_args)) function = perform_koenig_lookup (function, call_args, tf_none); if (function != NULL_TREE && (identifier_p (function) || (TREE_CODE (function) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (function, 0)))) && !any_type_dependent_arguments_p (call_args)) { if (TREE_CODE (function) == TEMPLATE_ID_EXPR) function = TREE_OPERAND (function, 0); if (koenig_p && (complain & tf_warning_or_error)) { /* For backwards compatibility and good diagnostics, try the unqualified lookup again if we aren't in SFINAE context. */ tree unq = (tsubst_copy_and_build (function, args, complain, in_decl, true, integral_constant_expression_p)); if (unq == error_mark_node) RETURN (error_mark_node); if (unq != function) { /* In a lambda fn, we have to be careful to not introduce new this captures. Legacy code can't be using lambdas anyway, so it's ok to be stricter. */ bool in_lambda = (current_class_type && LAMBDA_TYPE_P (current_class_type)); char const *const msg = G_("%qD was not declared in this scope, " "and no declarations were found by " "argument-dependent lookup at the point " "of instantiation"); bool diag = true; if (in_lambda) error_at (cp_expr_loc_or_input_loc (t), msg, function); else diag = permerror (cp_expr_loc_or_input_loc (t), msg, function); if (diag) { tree fn = unq; if (INDIRECT_REF_P (fn)) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = get_first_fn (fn); if (!DECL_P (fn)) /* Can't say anything more. */; else if (DECL_CLASS_SCOPE_P (fn)) { location_t loc = cp_expr_loc_or_input_loc (t); inform (loc, "declarations in dependent base %qT are " "not found by unqualified lookup", DECL_CLASS_CONTEXT (fn)); if (current_class_ptr) inform (loc, "use %<this->%D%> instead", function); else inform (loc, "use %<%T::%D%> instead", current_class_name, function); } else inform (DECL_SOURCE_LOCATION (fn), "%qD declared here, later in the " "translation unit", fn); if (in_lambda) RETURN (error_mark_node); } function = unq; } } if (identifier_p (function)) { if (complain & tf_error) unqualified_name_lookup_error (function); RETURN (error_mark_node); } } /* Remember that there was a reference to this entity. */ if (function != NULL_TREE && DECL_P (function) && !mark_used (function, complain) && !(complain & tf_error)) RETURN (error_mark_node); /* Put back tf_decltype for the actual call. */ complain |= decltype_flag; if (function == NULL_TREE) switch (CALL_EXPR_IFN (t)) { case IFN_LAUNDER: gcc_assert (nargs == 1); if (vec_safe_length (call_args) != 1) { error_at (cp_expr_loc_or_input_loc (t), "wrong number of arguments to " "%<__builtin_launder%>"); ret = error_mark_node; } else ret = finish_builtin_launder (cp_expr_loc_or_input_loc (t), (*call_args)[0], complain); break; case IFN_VEC_CONVERT: gcc_assert (nargs == 1); if (vec_safe_length (call_args) != 1) { error_at (cp_expr_loc_or_input_loc (t), "wrong number of arguments to " "%<__builtin_convertvector%>"); ret = error_mark_node; break; } ret = cp_build_vec_convert ((*call_args)[0], input_location, tsubst (TREE_TYPE (t), args, complain, in_decl), complain); if (TREE_CODE (ret) == VIEW_CONVERT_EXPR) RETURN (ret); break; default: /* Unsupported internal function with arguments. */ gcc_unreachable (); } else if (TREE_CODE (function) == OFFSET_REF || TREE_CODE (function) == DOTSTAR_EXPR || TREE_CODE (function) == MEMBER_REF) ret = build_offset_ref_call_from_tree (function, &call_args, complain); else if (TREE_CODE (function) == COMPONENT_REF) { tree instance = TREE_OPERAND (function, 0); tree fn = TREE_OPERAND (function, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (call_args))) ret = build_min_nt_call_vec (function, call_args); else if (!BASELINK_P (fn)) ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/false, /*koenig_p=*/false, complain); else ret = (build_new_method_call (instance, fn, &call_args, NULL_TREE, qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL, /*fn_p=*/NULL, complain)); } else if (concept_check_p (function)) { /* FUNCTION is a template-id referring to a concept definition. */ tree id = unpack_concept_check (function); tree tmpl = TREE_OPERAND (id, 0); tree args = TREE_OPERAND (id, 1); /* Calls to standard and variable concepts should have been previously diagnosed. */ gcc_assert (function_concept_p (tmpl)); /* Ensure the result is wrapped as a call expression. */ ret = build_concept_check (tmpl, args, tf_warning_or_error); } else ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/qualified_p, koenig_p, complain); if (ret != error_mark_node) { bool op = CALL_EXPR_OPERATOR_SYNTAX (t); bool ord = CALL_EXPR_ORDERED_ARGS (t); bool rev = CALL_EXPR_REVERSE_ARGS (t); if (op || ord || rev) { function = extract_call_expr (ret); CALL_EXPR_OPERATOR_SYNTAX (function) = op; CALL_EXPR_ORDERED_ARGS (function) = ord; CALL_EXPR_REVERSE_ARGS (function) = rev; } } RETURN (ret); } case COND_EXPR: { tree cond = RECUR (TREE_OPERAND (t, 0)); cond = mark_rvalue_use (cond); tree folded_cond = fold_non_dependent_expr (cond, complain); tree exp1, exp2; if (TREE_CODE (folded_cond) == INTEGER_CST) { if (integer_zerop (folded_cond)) { ++c_inhibit_evaluation_warnings; exp1 = RECUR (TREE_OPERAND (t, 1)); --c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); } else { exp1 = RECUR (TREE_OPERAND (t, 1)); ++c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); --c_inhibit_evaluation_warnings; } cond = folded_cond; } else { exp1 = RECUR (TREE_OPERAND (t, 1)); exp2 = RECUR (TREE_OPERAND (t, 2)); } warning_sentinel s(warn_duplicated_branches); RETURN (build_x_conditional_expr (EXPR_LOCATION (t), cond, exp1, exp2, complain)); } case PSEUDO_DTOR_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = tsubst (TREE_OPERAND (t, 2), args, complain, in_decl); RETURN (finish_pseudo_destructor_expr (op0, op1, op2, input_location)); } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) RETURN (t); if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t))) || (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t)))) { /* We have pack expansions, so expand those and create a new list out of it. */ tree purposevec = NULL_TREE; tree valuevec = NULL_TREE; tree chain; int i, len = -1; /* Expand the argument expressions. */ if (TREE_PURPOSE (t)) purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args, complain, in_decl); if (TREE_VALUE (t)) valuevec = tsubst_pack_expansion (TREE_VALUE (t), args, complain, in_decl); /* Build the rest of the list. */ chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); /* Determine the number of arguments. */ if (purposevec && TREE_CODE (purposevec) == TREE_VEC) { len = TREE_VEC_LENGTH (purposevec); gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec)); } else if (TREE_CODE (valuevec) == TREE_VEC) len = TREE_VEC_LENGTH (valuevec); else { /* Since we only performed a partial substitution into the argument pack, we only RETURN (a single list node. */ if (purposevec == TREE_PURPOSE (t) && valuevec == TREE_VALUE (t) && chain == TREE_CHAIN (t)) RETURN (t); RETURN (tree_cons (purposevec, valuevec, chain)); } /* Convert the argument vectors into a TREE_LIST */ i = len; while (i > 0) { /* Grab the Ith values. */ i--; purpose = purposevec ? TREE_VEC_ELT (purposevec, i) : NULL_TREE; value = valuevec ? convert_from_reference (TREE_VEC_ELT (valuevec, i)) : NULL_TREE; /* Build the list (backwards). */ chain = tree_cons (purpose, value, chain); } RETURN (chain); } purpose = TREE_PURPOSE (t); if (purpose) purpose = RECUR (purpose); value = TREE_VALUE (t); if (value) value = RECUR (value); chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) RETURN (t); RETURN (tree_cons (purpose, value, chain)); } case COMPONENT_REF: { tree object; tree object_type; tree member; tree r; object = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (object) && !mark_used (object, complain) && !(complain & tf_error)) RETURN (error_mark_node); object_type = TREE_TYPE (object); member = TREE_OPERAND (t, 1); if (BASELINK_P (member)) member = tsubst_baselink (member, non_reference (TREE_TYPE (object)), args, complain, in_decl); else member = tsubst_copy (member, args, complain, in_decl); if (member == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (member) == FIELD_DECL) { r = finish_non_static_data_member (member, object, NULL_TREE); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } else if (type_dependent_expression_p (object)) /* We can't do much here. */; else if (!CLASS_TYPE_P (object_type)) { if (scalarish_type_p (object_type)) { tree s = NULL_TREE; tree dtor = member; if (TREE_CODE (dtor) == SCOPE_REF) { s = TREE_OPERAND (dtor, 0); dtor = TREE_OPERAND (dtor, 1); } if (TREE_CODE (dtor) == BIT_NOT_EXPR) { dtor = TREE_OPERAND (dtor, 0); if (TYPE_P (dtor)) RETURN (finish_pseudo_destructor_expr (object, s, dtor, input_location)); } } } else if (TREE_CODE (member) == SCOPE_REF && TREE_CODE (TREE_OPERAND (member, 1)) == TEMPLATE_ID_EXPR) { /* Lookup the template functions now that we know what the scope is. */ tree scope = TREE_OPERAND (member, 0); tree tmpl = TREE_OPERAND (TREE_OPERAND (member, 1), 0); tree args = TREE_OPERAND (TREE_OPERAND (member, 1), 1); member = lookup_qualified_name (scope, tmpl, /*is_type_p=*/false, /*complain=*/false); if (BASELINK_P (member)) { BASELINK_FUNCTIONS (member) = build_nt (TEMPLATE_ID_EXPR, BASELINK_FUNCTIONS (member), args); member = (adjust_result_of_qualified_name_lookup (member, BINFO_TYPE (BASELINK_BINFO (member)), object_type)); } else { qualified_name_lookup_error (scope, tmpl, member, input_location); RETURN (error_mark_node); } } else if (TREE_CODE (member) == SCOPE_REF && !CLASS_TYPE_P (TREE_OPERAND (member, 0)) && TREE_CODE (TREE_OPERAND (member, 0)) != NAMESPACE_DECL) { if (complain & tf_error) { if (TYPE_P (TREE_OPERAND (member, 0))) error ("%qT is not a class or namespace", TREE_OPERAND (member, 0)); else error ("%qD is not a class or namespace", TREE_OPERAND (member, 0)); } RETURN (error_mark_node); } r = finish_class_member_access_expr (object, member, /*template_p=*/false, complain); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } case THROW_EXPR: RETURN (build_throw (input_location, RECUR (TREE_OPERAND (t, 0)))); case CONSTRUCTOR: { vec<constructor_elt, va_gc> *n; constructor_elt *ce; unsigned HOST_WIDE_INT idx; tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); bool process_index_p; int newlen; bool need_copy_p = false; tree r; if (type == error_mark_node) RETURN (error_mark_node); /* We do not want to process the index of aggregate initializers as they are identifier nodes which will be looked up by digest_init. */ process_index_p = !(type && MAYBE_CLASS_TYPE_P (type)); if (null_member_pointer_value_p (t)) { gcc_assert (same_type_p (type, TREE_TYPE (t))); RETURN (t); } n = vec_safe_copy (CONSTRUCTOR_ELTS (t)); newlen = vec_safe_length (n); FOR_EACH_VEC_SAFE_ELT (n, idx, ce) { if (ce->index && process_index_p /* An identifier index is looked up in the type being initialized, not the current scope. */ && TREE_CODE (ce->index) != IDENTIFIER_NODE) ce->index = RECUR (ce->index); if (PACK_EXPANSION_P (ce->value)) { /* Substitute into the pack expansion. */ ce->value = tsubst_pack_expansion (ce->value, args, complain, in_decl); if (ce->value == error_mark_node || PACK_EXPANSION_P (ce->value)) ; else if (TREE_VEC_LENGTH (ce->value) == 1) /* Just move the argument into place. */ ce->value = TREE_VEC_ELT (ce->value, 0); else { /* Update the length of the final CONSTRUCTOR arguments vector, and note that we will need to copy.*/ newlen = newlen + TREE_VEC_LENGTH (ce->value) - 1; need_copy_p = true; } } else ce->value = RECUR (ce->value); } if (need_copy_p) { vec<constructor_elt, va_gc> *old_n = n; vec_alloc (n, newlen); FOR_EACH_VEC_ELT (*old_n, idx, ce) { if (TREE_CODE (ce->value) == TREE_VEC) { int i, len = TREE_VEC_LENGTH (ce->value); for (i = 0; i < len; ++i) CONSTRUCTOR_APPEND_ELT (n, 0, TREE_VEC_ELT (ce->value, i)); } else CONSTRUCTOR_APPEND_ELT (n, 0, ce->value); } } r = build_constructor (init_list_type_node, n); CONSTRUCTOR_IS_DIRECT_INIT (r) = CONSTRUCTOR_IS_DIRECT_INIT (t); CONSTRUCTOR_IS_DESIGNATED_INIT (r) = CONSTRUCTOR_IS_DESIGNATED_INIT (t); if (TREE_HAS_CONSTRUCTOR (t)) { fcl_t cl = fcl_functional; if (CONSTRUCTOR_C99_COMPOUND_LITERAL (t)) cl = fcl_c99; RETURN (finish_compound_literal (type, r, complain, cl)); } TREE_TYPE (r) = type; RETURN (r); } case TYPEID_EXPR: { tree operand_0 = TREE_OPERAND (t, 0); if (TYPE_P (operand_0)) { operand_0 = tsubst (operand_0, args, complain, in_decl); RETURN (get_typeid (operand_0, complain)); } else { operand_0 = RECUR (operand_0); RETURN (build_typeid (operand_0, complain)); } } case VAR_DECL: if (!args) RETURN (t); /* Fall through */ case PARM_DECL: { tree r = tsubst_copy (t, args, complain, in_decl); /* ??? We're doing a subset of finish_id_expression here. */ if (tree wrap = maybe_get_tls_wrapper_call (r)) /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ r = wrap; else if (outer_automatic_var_p (r)) r = process_outer_var_ref (r, complain); if (!TYPE_REF_P (TREE_TYPE (t))) /* If the original type was a reference, we'll be wrapped in the appropriate INDIRECT_REF. */ r = convert_from_reference (r); RETURN (r); } case VA_ARG_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); RETURN (build_x_va_arg (EXPR_LOCATION (t), op0, type)); } case OFFSETOF_EXPR: { tree object_ptr = tsubst_copy_and_build (TREE_OPERAND (t, 1), args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); RETURN (finish_offsetof (object_ptr, RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); } case ADDRESSOF_EXPR: RETURN (cp_build_addressof (EXPR_LOCATION (t), RECUR (TREE_OPERAND (t, 0)), complain)); case TRAIT_EXPR: { tree type1 = tsubst (TRAIT_EXPR_TYPE1 (t), args, complain, in_decl); tree type2 = tsubst (TRAIT_EXPR_TYPE2 (t), args, complain, in_decl); RETURN (finish_trait_expr (TRAIT_EXPR_LOCATION (t), TRAIT_EXPR_KIND (t), type1, type2)); } case STMT_EXPR: { tree old_stmt_expr = cur_stmt_expr; tree stmt_expr = begin_stmt_expr (); cur_stmt_expr = stmt_expr; tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl, integral_constant_expression_p); stmt_expr = finish_stmt_expr (stmt_expr, false); cur_stmt_expr = old_stmt_expr; /* If the resulting list of expression statement is empty, fold it further into void_node. */ if (empty_expr_stmt_p (stmt_expr)) stmt_expr = void_node; RETURN (stmt_expr); } case LAMBDA_EXPR: { if (complain & tf_partial) { /* We don't have a full set of template arguments yet; don't touch the lambda at all. */ gcc_assert (processing_template_decl); return t; } tree r = tsubst_lambda_expr (t, args, complain, in_decl); RETURN (build_lambda_object (r)); } case TARGET_EXPR: /* We can get here for a constant initializer of non-dependent type. FIXME stop folding in cp_parser_initializer_clause. */ { tree r = get_target_expr_sfinae (RECUR (TARGET_EXPR_INITIAL (t)), complain); RETURN (r); } case TRANSACTION_EXPR: RETURN (tsubst_expr(t, args, complain, in_decl, integral_constant_expression_p)); case PAREN_EXPR: RETURN (finish_parenthesized_expr (RECUR (TREE_OPERAND (t, 0)))); case VEC_PERM_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = RECUR (TREE_OPERAND (t, 2)); RETURN (build_x_vec_perm_expr (input_location, op0, op1, op2, complain)); } case REQUIRES_EXPR: { tree r = tsubst_requires_expr (t, args, tf_none, in_decl); RETURN (r); } case RANGE_EXPR: /* No need to substitute further, a RANGE_EXPR will always be built with constant operands. */ RETURN (t); case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: if (location_wrapper_p (t)) /* We need to do this here as well as in tsubst_copy so we get the other tsubst_copy_and_build semantics for a PARM_DECL operand. */ RETURN (maybe_wrap_with_location (RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); /* fallthrough. */ default: /* Handle Objective-C++ constructs, if appropriate. */ { tree subst = objcp_tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false); if (subst) RETURN (subst); } RETURN (tsubst_copy (t, args, complain, in_decl)); } #undef RECUR #undef RETURN out: input_location = save_loc; return retval; } /* Verify that the instantiated ARGS are valid. For type arguments, make sure that the type's linkage is ok. For non-type arguments, make sure they are constants if they are integral or enumerations. Emit an error under control of COMPLAIN, and return TRUE on error. */ static bool check_instantiated_arg (tree tmpl, tree t, tsubst_flags_t complain) { if (dependent_template_arg_p (t)) return false; if (ARGUMENT_PACK_P (t)) { tree vec = ARGUMENT_PACK_ARGS (t); int len = TREE_VEC_LENGTH (vec); bool result = false; int i; for (i = 0; i < len; ++i) if (check_instantiated_arg (tmpl, TREE_VEC_ELT (vec, i), complain)) result = true; return result; } else if (TYPE_P (t)) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. This implies that names with no linkage cannot be used as template arguments DR 757 relaxes this restriction for C++0x. */ tree nt = (cxx_dialect > cxx98 ? NULL_TREE : no_linkage_check (t, /*relaxed_p=*/false)); if (nt) { /* DR 488 makes use of a type with no linkage cause type deduction to fail. */ if (complain & tf_error) { if (TYPE_UNNAMED_P (nt)) error ("%qT is/uses unnamed type", t); else error ("template argument for %qD uses local type %qT", tmpl, t); } return true; } /* In order to avoid all sorts of complications, we do not allow variably-modified types as template arguments. */ else if (variably_modified_type_p (t, NULL_TREE)) { if (complain & tf_error) error ("%qT is a variably modified type", t); return true; } } /* Class template and alias template arguments should be OK. */ else if (DECL_TYPE_TEMPLATE_P (t)) ; /* A non-type argument of integral or enumerated type must be a constant. */ else if (TREE_TYPE (t) && INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (t)) && !REFERENCE_REF_P (t) && !TREE_CONSTANT (t)) { if (complain & tf_error) error ("integral expression %qE is not constant", t); return true; } return false; } static bool check_instantiated_args (tree tmpl, tree args, tsubst_flags_t complain) { int ix, len = DECL_NTPARMS (tmpl); bool result = false; for (ix = 0; ix != len; ix++) { if (check_instantiated_arg (tmpl, TREE_VEC_ELT (args, ix), complain)) result = true; } if (result && (complain & tf_error)) error (" trying to instantiate %qD", tmpl); return result; } /* We're out of SFINAE context now, so generate diagnostics for the access errors we saw earlier when instantiating D from TMPL and ARGS. */ static void recheck_decl_substitution (tree d, tree tmpl, tree args) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree type = TREE_TYPE (pattern); location_t loc = input_location; push_access_scope (d); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (pattern); tsubst (type, args, tf_warning_or_error, d); input_location = loc; pop_deferring_access_checks (); pop_access_scope (d); } /* Instantiate the indicated variable, function, or alias template TMPL with the template arguments in TARG_PTR. */ static tree instantiate_template_1 (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree targ_ptr = orig_args; tree fndecl; tree gen_tmpl; tree spec; bool access_ok = true; if (tmpl == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); /* If this function is a clone, handle it specially. */ if (DECL_CLONED_FUNCTION_P (tmpl)) { tree spec; tree clone; /* Use DECL_ABSTRACT_ORIGIN because only FUNCTION_DECLs have DECL_CLONED_FUNCTION. */ spec = instantiate_template (DECL_ABSTRACT_ORIGIN (tmpl), targ_ptr, complain); if (spec == error_mark_node) return error_mark_node; /* Look for the clone. */ FOR_EACH_CLONE (clone, spec) if (DECL_NAME (clone) == DECL_NAME (tmpl)) return clone; /* We should always have found the clone by now. */ gcc_unreachable (); return NULL_TREE; } if (targ_ptr == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ gen_tmpl = most_general_template (tmpl); if (TMPL_ARGS_DEPTH (targ_ptr) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl))) /* targ_ptr only has the innermost template args, so add the outer ones from tmpl, which could be either a partial instantiation or gen_tmpl (in the case of a non-dependent call within a template definition). */ targ_ptr = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (tmpl)), targ_ptr)); /* It would be nice to avoid hashing here and then again in tsubst_decl, but it doesn't seem to be on the hot path. */ spec = retrieve_specialization (gen_tmpl, targ_ptr, 0); gcc_assert (tmpl == gen_tmpl || ((fndecl = retrieve_specialization (tmpl, orig_args, 0)) == spec) || fndecl == NULL_TREE); if (spec != NULL_TREE) { if (FNDECL_HAS_ACCESS_ERRORS (spec)) { if (complain & tf_error) recheck_decl_substitution (spec, gen_tmpl, targ_ptr); return error_mark_node; } return spec; } if (check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (targ_ptr), complain)) return error_mark_node; /* We are building a FUNCTION_DECL, during which the access of its parameters and return types have to be checked. However this FUNCTION_DECL which is the desired context for access checking is not built yet. We solve this chicken-and-egg problem by deferring all checks until we have the FUNCTION_DECL. */ push_deferring_access_checks (dk_deferred); /* Instantiation of the function happens in the context of the function template, not the context of the overload resolution we're doing. */ push_to_top_level (); /* If there are dependent arguments, e.g. because we're doing partial ordering, make sure processing_template_decl stays set. */ if (uses_template_parms (targ_ptr)) ++processing_template_decl; if (DECL_CLASS_SCOPE_P (gen_tmpl)) { tree ctx = tsubst_aggr_type (DECL_CONTEXT (gen_tmpl), targ_ptr, complain, gen_tmpl, true); push_nested_class (ctx); } tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl); fndecl = NULL_TREE; if (VAR_P (pattern)) { /* We need to determine if we're using a partial or explicit specialization now, because the type of the variable could be different. */ tree tid = lookup_template_variable (gen_tmpl, targ_ptr); tree elt = most_specialized_partial_spec (tid, complain); if (elt == error_mark_node) pattern = error_mark_node; else if (elt) { tree partial_tmpl = TREE_VALUE (elt); tree partial_args = TREE_PURPOSE (elt); tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl); fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl); } } /* Substitute template parameters to obtain the specialization. */ if (fndecl == NULL_TREE) fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl); if (DECL_CLASS_SCOPE_P (gen_tmpl)) pop_nested_class (); pop_from_top_level (); if (fndecl == error_mark_node) { pop_deferring_access_checks (); return error_mark_node; } /* The DECL_TI_TEMPLATE should always be the immediate parent template, not the most general template. */ DECL_TI_TEMPLATE (fndecl) = tmpl; DECL_TI_ARGS (fndecl) = targ_ptr; /* Now we know the specialization, compute access previously deferred. Do no access control for inheriting constructors, as we already checked access for the inherited constructor. */ if (!(flag_new_inheriting_ctors && DECL_INHERITED_CTOR (fndecl))) { push_access_scope (fndecl); if (!perform_deferred_access_checks (complain)) access_ok = false; pop_access_scope (fndecl); } pop_deferring_access_checks (); /* If we've just instantiated the main entry point for a function, instantiate all the alternate entry points as well. We do this by cloning the instantiation of the main entry point, not by instantiating the template clones. */ if (tree chain = DECL_CHAIN (gen_tmpl)) if (DECL_P (chain) && DECL_CLONED_FUNCTION_P (chain)) clone_function_decl (fndecl, /*update_methods=*/false); if (!access_ok) { if (!(complain & tf_error)) { /* Remember to reinstantiate when we're out of SFINAE so the user can see the errors. */ FNDECL_HAS_ACCESS_ERRORS (fndecl) = true; } return error_mark_node; } return fndecl; } /* Wrapper for instantiate_template_1. */ tree instantiate_template (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_template_1 (tmpl, orig_args, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Instantiate the alias template TMPL with ARGS. Also push a template instantiation level, which instantiate_template doesn't do because functions and variables have sufficient context established by the callers. */ static tree instantiate_alias_template (tree tmpl, tree args, tsubst_flags_t complain) { if (tmpl == error_mark_node || args == error_mark_node) return error_mark_node; args = coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (tmpl), args, tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); /* FIXME check for satisfaction in check_instantiated_args. */ if (flag_concepts && !any_dependent_template_arguments_p (args) && !constraints_satisfied_p (tmpl, args)) { if (complain & tf_error) { auto_diagnostic_group d; error ("template constraint failure for %qD", tmpl); diagnose_constraints (input_location, tmpl, args); } return error_mark_node; } if (!push_tinst_level (tmpl, args)) return error_mark_node; tree r = instantiate_template (tmpl, args, complain); pop_tinst_level (); return r; } /* PARM is a template parameter pack for FN. Returns true iff PARM is used in a deducible way in the argument list of FN. */ static bool pack_deducible_p (tree parm, tree fn) { tree t = FUNCTION_FIRST_USER_PARMTYPE (fn); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); tree packs; if (!PACK_EXPANSION_P (type)) continue; for (packs = PACK_EXPANSION_PARAMETER_PACKS (type); packs; packs = TREE_CHAIN (packs)) if (template_args_equal (TREE_VALUE (packs), parm)) { /* The template parameter pack is used in a function parameter pack. If this is the end of the parameter list, the template parameter pack is deducible. */ if (TREE_CHAIN (t) == void_list_node) return true; else /* Otherwise, not. Well, it could be deduced from a non-pack parameter, but doing so would end up with a deduction mismatch, so don't bother. */ return false; } } /* The template parameter pack isn't used in any function parameter packs, but it might be used deeper, e.g. tuple<Args...>. */ return true; } /* Subroutine of fn_type_unification: check non-dependent parms for convertibility. */ static int check_non_deducible_conversions (tree parms, const tree *args, unsigned nargs, tree fn, unification_kind_t strict, int flags, struct conversion **convs, bool explain_p) { /* Non-constructor methods need to leave a conversion for 'this', which isn't included in nargs here. */ unsigned offset = (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && !DECL_CONSTRUCTOR_P (fn)); for (unsigned ia = 0; parms && parms != void_list_node && ia < nargs; ) { tree parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; if (!uses_template_parms (parm)) { tree arg = args[ia]; conversion **conv_p = convs ? &convs[ia+offset] : NULL; int lflags = conv_flags (ia, nargs, fn, arg, flags); if (check_non_deducible_conversion (parm, arg, strict, lflags, conv_p, explain_p)) return 1; } ++ia; } return 0; } /* The FN is a TEMPLATE_DECL for a function. ARGS is an array with NARGS elements of the arguments that are being used when calling it. TARGS is a vector into which the deduced template arguments are placed. Returns either a FUNCTION_DECL for the matching specialization of FN or NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is true, diagnostics will be printed to explain why it failed. If FN is a conversion operator, or we are trying to produce a specific specialization, RETURN_TYPE is the return type desired. The EXPLICIT_TARGS are explicit template arguments provided via a template-id. The parameter STRICT is one of: DEDUCE_CALL: We are deducing arguments for a function call, as in [temp.deduct.call]. If RETURN_TYPE is non-null, we are deducing arguments for a call to the result of a conversion function template, as in [over.call.object]. DEDUCE_CONV: We are deducing arguments for a conversion function, as in [temp.deduct.conv]. DEDUCE_EXACT: We are deducing arguments when doing an explicit instantiation as in [temp.explicit], when determining an explicit specialization as in [temp.expl.spec], or when taking the address of a function template, as in [temp.deduct.funcaddr]. */ tree fn_type_unification (tree fn, tree explicit_targs, tree targs, const tree *args, unsigned int nargs, tree return_type, unification_kind_t strict, int flags, struct conversion **convs, bool explain_p, bool decltype_p) { tree parms; tree fntype; tree decl = NULL_TREE; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool ok; static int deduction_depth; /* type_unification_real will pass back any access checks from default template argument substitution. */ vec<deferred_access_check, va_gc> *checks = NULL; /* We don't have all the template args yet. */ bool incomplete = true; tree orig_fn = fn; if (flag_new_inheriting_ctors) fn = strip_inheriting_ctors (fn); tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn); tree r = error_mark_node; tree full_targs = targs; if (TMPL_ARGS_DEPTH (targs) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (fn))) full_targs = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (fn)), targs)); if (decltype_p) complain |= tf_decltype; /* In C++0x, it's possible to have a function template whose type depends on itself recursively. This is most obvious with decltype, but can also occur with enumeration scope (c++/48969). So we need to catch infinite recursion and reject the substitution at deduction time; this function will return error_mark_node for any repeated substitution. This also catches excessive recursion such as when f<N> depends on f<N-1> across all integers, and returns error_mark_node for all the substitutions back up to the initial one. This is, of course, not reentrant. */ if (excessive_deduction_depth) return error_mark_node; ++deduction_depth; gcc_assert (TREE_CODE (fn) == TEMPLATE_DECL); fntype = TREE_TYPE (fn); if (explicit_targs) { /* [temp.deduct] The specified template arguments must match the template parameters in kind (i.e., type, nontype, template), and there must not be more arguments than there are parameters; otherwise type deduction fails. Nontype arguments must match the types of the corresponding nontype template parameters, or must be convertible to the types of the corresponding nontype parameters as specified in _temp.arg.nontype_, otherwise type deduction fails. All references in the function type of the function template to the corresponding template parameters are replaced by the specified template argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. */ int i, len = TREE_VEC_LENGTH (tparms); location_t loc = input_location; incomplete = false; if (explicit_targs == error_mark_node) goto fail; if (TMPL_ARGS_DEPTH (explicit_targs) < TMPL_ARGS_DEPTH (full_targs)) explicit_targs = add_outermost_template_args (full_targs, explicit_targs); /* Adjust any explicit template arguments before entering the substitution context. */ explicit_targs = (coerce_template_parms (tparms, explicit_targs, NULL_TREE, complain|tf_partial, /*require_all_args=*/false, /*use_default_args=*/false)); if (explicit_targs == error_mark_node) goto fail; /* Substitute the explicit args into the function type. This is necessary so that, for instance, explicitly declared function arguments can match null pointed constants. If we were given an incomplete set of explicit args, we must not do semantic processing during substitution as we could create partial instantiations. */ for (i = 0; i < len; i++) { tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); bool parameter_pack = false; tree targ = TREE_VEC_ELT (explicit_targs, i); /* Dig out the actual parm. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) { parm = TREE_TYPE (parm); parameter_pack = TEMPLATE_TYPE_PARAMETER_PACK (parm); } else if (TREE_CODE (parm) == PARM_DECL) { parm = DECL_INITIAL (parm); parameter_pack = TEMPLATE_PARM_PARAMETER_PACK (parm); } if (targ == NULL_TREE) /* No explicit argument for this template parameter. */ incomplete = true; else if (parameter_pack && pack_deducible_p (parm, fn)) { /* Mark the argument pack as "incomplete". We could still deduce more arguments during unification. We remove this mark in type_unification_real. */ ARGUMENT_PACK_INCOMPLETE_P(targ) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = ARGUMENT_PACK_ARGS (targ); /* We have some incomplete argument packs. */ incomplete = true; } } if (incomplete) { if (!push_tinst_level (fn, explicit_targs)) { excessive_deduction_depth = true; goto fail; } ++processing_template_decl; input_location = DECL_SOURCE_LOCATION (fn); /* Ignore any access checks; we'll see them again in instantiate_template and they might have the wrong access path at this point. */ push_deferring_access_checks (dk_deferred); tsubst_flags_t ecomplain = complain | tf_partial | tf_fndecl_type; fntype = tsubst (TREE_TYPE (fn), explicit_targs, ecomplain, NULL_TREE); pop_deferring_access_checks (); input_location = loc; --processing_template_decl; pop_tinst_level (); if (fntype == error_mark_node) goto fail; } /* Place the explicitly specified arguments in TARGS. */ explicit_targs = INNERMOST_TEMPLATE_ARGS (explicit_targs); for (i = NUM_TMPL_ARGS (explicit_targs); i--;) TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i); if (!incomplete && CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, NUM_TMPL_ARGS (explicit_targs)); } if (return_type && strict != DEDUCE_CALL) { tree *new_args = XALLOCAVEC (tree, nargs + 1); new_args[0] = return_type; memcpy (new_args + 1, args, nargs * sizeof (tree)); args = new_args; ++nargs; } if (!incomplete) goto deduced; /* Never do unification on the 'this' parameter. */ parms = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (fntype)); if (return_type && strict == DEDUCE_CALL) { /* We're deducing for a call to the result of a template conversion function. The parms we really want are in return_type. */ if (INDIRECT_TYPE_P (return_type)) return_type = TREE_TYPE (return_type); parms = TYPE_ARG_TYPES (return_type); } else if (return_type) { parms = tree_cons (NULL_TREE, TREE_TYPE (fntype), parms); } /* We allow incomplete unification without an error message here because the standard doesn't seem to explicitly prohibit it. Our callers must be ready to deal with unification failures in any event. */ /* If we aren't explaining yet, push tinst context so we can see where any errors (e.g. from class instantiations triggered by instantiation of default template arguments) come from. If we are explaining, this context is redundant. */ if (!explain_p && !push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } ok = !type_unification_real (DECL_INNERMOST_TEMPLATE_PARMS (fn), full_targs, parms, args, nargs, /*subr=*/0, strict, &checks, explain_p); if (!explain_p) pop_tinst_level (); if (!ok) goto fail; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. We cannot check this property before we have deduced all template arguments, because the template parameter types of a template template parameter might depend on prior template parameters deduced after the template template parameter. The following ill-formed example illustrates this issue: template<typename T, template<T> class C> void f(C<5>, T); template<int N> struct X {}; void g() { f(X<5>(), 5l); // error: template argument deduction fails } The template parameter list of 'C' depends on the template type parameter 'T', but 'C' is deduced to 'X' before 'T' is deduced to 'long'. Thus, we can't check that 'C' cannot bind to 'X' at the time that we deduce 'C'. */ if (!template_template_parm_bindings_ok_p (DECL_INNERMOST_TEMPLATE_PARMS (fn), targs)) { unify_inconsistent_template_template_parameters (explain_p); goto fail; } /* DR 1391: All parameters have args, now check non-dependent parms for convertibility. */ if (check_non_deducible_conversions (parms, args, nargs, fn, strict, flags, convs, explain_p)) goto fail; deduced: /* All is well so far. Now, check: [temp.deduct] When all template arguments have been deduced, all uses of template parameters in nondeduced contexts are replaced with the corresponding deduced argument values. If the substitution results in an invalid type, as described above, type deduction fails. */ if (!push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } /* Also collect access checks from the instantiation. */ reopen_deferring_access_checks (checks); decl = instantiate_template (fn, targs, complain); checks = get_deferred_access_checks (); pop_deferring_access_checks (); pop_tinst_level (); if (decl == error_mark_node) goto fail; /* Now perform any access checks encountered during substitution. */ push_access_scope (decl); ok = perform_access_checks (checks, complain); pop_access_scope (decl); if (!ok) goto fail; /* If we're looking for an exact match, check that what we got is indeed an exact match. It might not be if some template parameters are used in non-deduced contexts. But don't check for an exact match if we have dependent template arguments; in that case we're doing partial ordering, and we already know that we have two candidates that will provide the actual type. */ if (strict == DEDUCE_EXACT && !any_dependent_template_arguments_p (targs)) { tree substed = TREE_TYPE (decl); unsigned int i; tree sarg = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (substed)); if (return_type) sarg = tree_cons (NULL_TREE, TREE_TYPE (substed), sarg); for (i = 0; i < nargs && sarg; ++i, sarg = TREE_CHAIN (sarg)) if (!same_type_p (args[i], TREE_VALUE (sarg))) { unify_type_mismatch (explain_p, args[i], TREE_VALUE (sarg)); goto fail; } } /* After doing deduction with the inherited constructor, actually return an instantiation of the inheriting constructor. */ if (orig_fn != fn) decl = instantiate_template (orig_fn, targs, complain); r = decl; fail: --deduction_depth; if (excessive_deduction_depth) { if (deduction_depth == 0) /* Reset once we're all the way out. */ excessive_deduction_depth = false; } return r; } /* Adjust types before performing type deduction, as described in [temp.deduct.call] and [temp.deduct.conv]. The rules in these two sections are symmetric. PARM is the type of a function parameter or the return type of the conversion function. ARG is the type of the argument passed to the call, or the type of the value initialized with the result of the conversion function. ARG_EXPR is the original argument expression, which may be null. */ static int maybe_adjust_types_for_deduction (unification_kind_t strict, tree* parm, tree* arg, tree arg_expr) { int result = 0; switch (strict) { case DEDUCE_CALL: break; case DEDUCE_CONV: /* Swap PARM and ARG throughout the remainder of this function; the handling is precisely symmetric since PARM will initialize ARG rather than vice versa. */ std::swap (parm, arg); break; case DEDUCE_EXACT: /* Core issue #873: Do the DR606 thing (see below) for these cases, too, but here handle it by stripping the reference from PARM rather than by adding it to ARG. */ if (TYPE_REF_P (*parm) && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && TYPE_REF_P (*arg) && !TYPE_REF_IS_RVALUE (*arg)) *parm = TREE_TYPE (*parm); /* Nothing else to do in this case. */ return 0; default: gcc_unreachable (); } if (!TYPE_REF_P (*parm)) { /* [temp.deduct.call] If P is not a reference type: --If A is an array type, the pointer type produced by the array-to-pointer standard conversion (_conv.array_) is used in place of A for type deduction; otherwise, --If A is a function type, the pointer type produced by the function-to-pointer standard conversion (_conv.func_) is used in place of A for type deduction; otherwise, --If A is a cv-qualified type, the top level cv-qualifiers of A's type are ignored for type deduction. */ if (TREE_CODE (*arg) == ARRAY_TYPE) *arg = build_pointer_type (TREE_TYPE (*arg)); else if (TREE_CODE (*arg) == FUNCTION_TYPE) *arg = build_pointer_type (*arg); else *arg = TYPE_MAIN_VARIANT (*arg); } /* [14.8.2.1/3 temp.deduct.call], "A forwarding reference is an rvalue reference to a cv-unqualified template parameter that does not represent a template parameter of a class template (during class template argument deduction (13.3.1.8)). If P is a forwarding reference and the argument is an lvalue, the type "lvalue reference to A" is used in place of A for type deduction. */ if (TYPE_REF_P (*parm) && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && !TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (*parm)) && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && (arg_expr ? lvalue_p (arg_expr) /* try_one_overload doesn't provide an arg_expr, but functions are always lvalues. */ : TREE_CODE (*arg) == FUNCTION_TYPE)) *arg = build_reference_type (*arg); /* [temp.deduct.call] If P is a cv-qualified type, the top level cv-qualifiers of P's type are ignored for type deduction. If P is a reference type, the type referred to by P is used for type deduction. */ *parm = TYPE_MAIN_VARIANT (*parm); if (TYPE_REF_P (*parm)) { *parm = TREE_TYPE (*parm); result |= UNIFY_ALLOW_OUTER_MORE_CV_QUAL; } /* DR 322. For conversion deduction, remove a reference type on parm too (which has been swapped into ARG). */ if (strict == DEDUCE_CONV && TYPE_REF_P (*arg)) *arg = TREE_TYPE (*arg); return result; } /* Subroutine of fn_type_unification. PARM is a function parameter of a template which doesn't contain any deducible template parameters; check if ARG is a suitable match for it. STRICT, FLAGS and EXPLAIN_P are as in unify_one_argument. */ static int check_non_deducible_conversion (tree parm, tree arg, int strict, int flags, struct conversion **conv_p, bool explain_p) { tree type; if (!TYPE_P (arg)) type = TREE_TYPE (arg); else type = arg; if (same_type_p (parm, type)) return unify_success (explain_p); tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); if (strict == DEDUCE_CONV) { if (can_convert_arg (type, parm, NULL_TREE, flags, complain)) return unify_success (explain_p); } else if (strict != DEDUCE_EXACT) { bool ok = false; tree conv_arg = TYPE_P (arg) ? NULL_TREE : arg; if (conv_p) /* Avoid recalculating this in add_function_candidate. */ ok = (*conv_p = good_conversion (parm, type, conv_arg, flags, complain)); else ok = can_convert_arg (parm, type, conv_arg, flags, complain); if (ok) return unify_success (explain_p); } if (strict == DEDUCE_EXACT) return unify_type_mismatch (explain_p, parm, arg); else return unify_arg_conversion (explain_p, parm, type, arg); } static bool uses_deducible_template_parms (tree type); /* Returns true iff the expression EXPR is one from which a template argument can be deduced. In other words, if it's an undecorated use of a template non-type parameter. */ static bool deducible_expression (tree expr) { /* Strip implicit conversions. */ while (CONVERT_EXPR_P (expr) || TREE_CODE (expr) == VIEW_CONVERT_EXPR) expr = TREE_OPERAND (expr, 0); return (TREE_CODE (expr) == TEMPLATE_PARM_INDEX); } /* Returns true iff the array domain DOMAIN uses a template parameter in a deducible way; that is, if it has a max value of <PARM> - 1. */ static bool deducible_array_bound (tree domain) { if (domain == NULL_TREE) return false; tree max = TYPE_MAX_VALUE (domain); if (TREE_CODE (max) != MINUS_EXPR) return false; return deducible_expression (TREE_OPERAND (max, 0)); } /* Returns true iff the template arguments ARGS use a template parameter in a deducible way. */ static bool deducible_template_args (tree args) { for (int i = 0; i < TREE_VEC_LENGTH (args); ++i) { bool deducible; tree elt = TREE_VEC_ELT (args, i); if (ARGUMENT_PACK_P (elt)) deducible = deducible_template_args (ARGUMENT_PACK_ARGS (elt)); else { if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (TREE_CODE (elt) == TEMPLATE_TEMPLATE_PARM) deducible = true; else if (TYPE_P (elt)) deducible = uses_deducible_template_parms (elt); else deducible = deducible_expression (elt); } if (deducible) return true; } return false; } /* Returns true iff TYPE contains any deducible references to template parameters, as per 14.8.2.5. */ static bool uses_deducible_template_parms (tree type) { if (PACK_EXPANSION_P (type)) type = PACK_EXPANSION_PATTERN (type); /* T cv-list T TT<T> TT<i> TT<> */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* T* T& T&& */ if (INDIRECT_TYPE_P (type)) return uses_deducible_template_parms (TREE_TYPE (type)); /* T[integer-constant ] type [i] */ if (TREE_CODE (type) == ARRAY_TYPE) return (uses_deducible_template_parms (TREE_TYPE (type)) || deducible_array_bound (TYPE_DOMAIN (type))); /* T type ::* type T::* T T::* T (type ::*)() type (T::*)() type (type ::*)(T) type (T::*)(T) T (type ::*)(T) T (T::*)() T (T::*)(T) */ if (TYPE_PTRMEM_P (type)) return (uses_deducible_template_parms (TYPE_PTRMEM_CLASS_TYPE (type)) || (uses_deducible_template_parms (TYPE_PTRMEM_POINTED_TO_TYPE (type)))); /* template-name <T> (where template-name refers to a class template) template-name <i> (where template-name refers to a class template) */ if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) return deducible_template_args (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))); /* type (T) T() T(T) */ if (FUNC_OR_METHOD_TYPE_P (type)) { if (uses_deducible_template_parms (TREE_TYPE (type))) return true; tree parm = TYPE_ARG_TYPES (type); if (TREE_CODE (type) == METHOD_TYPE) parm = TREE_CHAIN (parm); for (; parm; parm = TREE_CHAIN (parm)) if (uses_deducible_template_parms (TREE_VALUE (parm))) return true; } return false; } /* Subroutine of type_unification_real and unify_pack_expansion to handle unification of a single P/A pair. Parameters are as for those functions. */ static int unify_one_argument (tree tparms, tree targs, tree parm, tree arg, int subr, unification_kind_t strict, bool explain_p) { tree arg_expr = NULL_TREE; int arg_strict; if (arg == error_mark_node || parm == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); /* Implicit conversions (Clause 4) will be performed on a function argument to convert it to the type of the corresponding function parameter if the parameter type contains no template-parameters that participate in template argument deduction. */ if (strict != DEDUCE_EXACT && TYPE_P (parm) && !uses_deducible_template_parms (parm)) /* For function parameters with no deducible template parameters, just return. We'll check non-dependent conversions later. */ return unify_success (explain_p); switch (strict) { case DEDUCE_CALL: arg_strict = (UNIFY_ALLOW_OUTER_LEVEL | UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_DERIVED); break; case DEDUCE_CONV: arg_strict = UNIFY_ALLOW_LESS_CV_QUAL; break; case DEDUCE_EXACT: arg_strict = UNIFY_ALLOW_NONE; break; default: gcc_unreachable (); } /* We only do these transformations if this is the top-level parameter_type_list in a call or declaration matching; in other situations (nested function declarators, template argument lists) we won't be comparing a type to an expression, and we don't do any type adjustments. */ if (!subr) { if (!TYPE_P (arg)) { gcc_assert (TREE_TYPE (arg) != NULL_TREE); if (type_unknown_p (arg)) { /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. */ resolve_overloaded_unification (tparms, targs, parm, arg, strict, arg_strict, explain_p); /* If a unique match was not found, this is a non-deduced context, so we still succeed. */ return unify_success (explain_p); } arg_expr = arg; arg = unlowered_expr_type (arg); if (arg == error_mark_node) return unify_invalid (explain_p); } arg_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, arg_expr); } else if ((TYPE_P (parm) || TREE_CODE (parm) == TEMPLATE_DECL) != (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL)) return unify_template_argument_mismatch (explain_p, parm, arg); /* For deduction from an init-list we need the actual list. */ if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr)) arg = arg_expr; return unify (tparms, targs, parm, arg, arg_strict, explain_p); } /* for_each_template_parm callback that always returns 0. */ static int zero_r (tree, void *) { return 0; } /* for_each_template_parm any_fn callback to handle deduction of a template type argument from the type of an array bound. */ static int array_deduction_r (tree t, void *data) { tree_pair_p d = (tree_pair_p)data; tree &tparms = d->purpose; tree &targs = d->value; if (TREE_CODE (t) == ARRAY_TYPE) if (tree dom = TYPE_DOMAIN (t)) if (tree max = TYPE_MAX_VALUE (dom)) { if (TREE_CODE (max) == MINUS_EXPR) max = TREE_OPERAND (max, 0); if (TREE_CODE (max) == TEMPLATE_PARM_INDEX) unify (tparms, targs, TREE_TYPE (max), size_type_node, UNIFY_ALLOW_NONE, /*explain*/false); } /* Keep walking. */ return 0; } /* Try to deduce any not-yet-deduced template type arguments from the type of an array bound. This is handled separately from unify because 14.8.2.5 says "The type of a type parameter is only deduced from an array bound if it is not otherwise deduced." */ static void try_array_deduction (tree tparms, tree targs, tree parm) { tree_pair_s data = { tparms, targs }; hash_set<tree> visited; for_each_template_parm (parm, zero_r, &data, &visited, /*nondeduced*/false, array_deduction_r); } /* Most parms like fn_type_unification. If SUBR is 1, we're being called recursively (to unify the arguments of a function or method parameter of a function template). CHECKS is a pointer to a vector of access checks encountered while substituting default template arguments. */ static int type_unification_real (tree tparms, tree full_targs, tree xparms, const tree *xargs, unsigned int xnargs, int subr, unification_kind_t strict, vec<deferred_access_check, va_gc> **checks, bool explain_p) { tree parm, arg; int i; int ntparms = TREE_VEC_LENGTH (tparms); int saw_undeduced = 0; tree parms; const tree *args; unsigned int nargs; unsigned int ia; gcc_assert (TREE_CODE (tparms) == TREE_VEC); gcc_assert (xparms == NULL_TREE || TREE_CODE (xparms) == TREE_LIST); gcc_assert (ntparms > 0); tree targs = INNERMOST_TEMPLATE_ARGS (full_targs); /* Reset the number of non-defaulted template arguments contained in TARGS. */ NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs) = NULL_TREE; again: parms = xparms; args = xargs; nargs = xnargs; ia = 0; while (parms && parms != void_list_node && ia < nargs) { parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; arg = args[ia]; ++ia; if (unify_one_argument (tparms, full_targs, parm, arg, subr, strict, explain_p)) return 1; } if (parms && parms != void_list_node && TREE_CODE (TREE_VALUE (parms)) == TYPE_PACK_EXPANSION) { /* Unify the remaining arguments with the pack expansion type. */ tree argvec; tree parmvec = make_tree_vec (1); /* Allocate a TREE_VEC and copy in all of the arguments */ argvec = make_tree_vec (nargs - ia); for (i = 0; ia < nargs; ++ia, ++i) TREE_VEC_ELT (argvec, i) = args[ia]; /* Copy the parameter into parmvec. */ TREE_VEC_ELT (parmvec, 0) = TREE_VALUE (parms); if (unify_pack_expansion (tparms, full_targs, parmvec, argvec, strict, /*subr=*/subr, explain_p)) return 1; /* Advance to the end of the list of parameters. */ parms = TREE_CHAIN (parms); } /* Fail if we've reached the end of the parm list, and more args are present, and the parm list isn't variadic. */ if (ia < nargs && parms == void_list_node) return unify_too_many_arguments (explain_p, nargs, ia); /* Fail if parms are left and they don't have default values and they aren't all deduced as empty packs (c++/57397). This is consistent with sufficient_parms_p. */ if (parms && parms != void_list_node && TREE_PURPOSE (parms) == NULL_TREE) { unsigned int count = nargs; tree p = parms; bool type_pack_p; do { type_pack_p = TREE_CODE (TREE_VALUE (p)) == TYPE_PACK_EXPANSION; if (!type_pack_p) count++; p = TREE_CHAIN (p); } while (p && p != void_list_node); if (count != nargs) return unify_too_few_arguments (explain_p, ia, count, type_pack_p); } if (!subr) { tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool tried_array_deduction = (cxx_dialect < cxx17); for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); /* Clear the "incomplete" flags on all argument packs now so that substituting them into later default arguments works. */ if (targ && ARGUMENT_PACK_P (targ)) { ARGUMENT_PACK_INCOMPLETE_P (targ) = 0; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = NULL_TREE; } if (targ || tparm == error_mark_node) continue; tparm = TREE_VALUE (tparm); if (TREE_CODE (tparm) == TYPE_DECL && !tried_array_deduction) { try_array_deduction (tparms, targs, xparms); tried_array_deduction = true; if (TREE_VEC_ELT (targs, i)) continue; } /* If this is an undeduced nontype parameter that depends on a type parameter, try another pass; its type may have been deduced from a later argument than the one from which this parameter can be deduced. */ if (TREE_CODE (tparm) == PARM_DECL && uses_template_parms (TREE_TYPE (tparm)) && saw_undeduced < 2) { saw_undeduced = 1; continue; } /* Core issue #226 (C++0x) [temp.deduct]: If a template argument has not been deduced, its default template argument, if any, is used. When we are in C++98 mode, TREE_PURPOSE will either be NULL_TREE or ERROR_MARK_NODE, so we do not need to explicitly check cxx_dialect here. */ if (TREE_PURPOSE (TREE_VEC_ELT (tparms, i))) /* OK, there is a default argument. Wait until after the conversion check to do substitution. */ continue; /* If the type parameter is a parameter pack, then it will be deduced to an empty parameter pack. */ if (template_parameter_pack_p (tparm)) { tree arg; if (TREE_CODE (tparm) == TEMPLATE_PARM_INDEX) { arg = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (arg) = 1; } else arg = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (arg, make_tree_vec (0)); TREE_VEC_ELT (targs, i) = arg; continue; } return unify_parameter_deduction_failure (explain_p, tparm); } /* Now substitute into the default template arguments. */ for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); if (targ || tparm == error_mark_node) continue; tree parm = TREE_VALUE (tparm); tree arg = TREE_PURPOSE (tparm); reopen_deferring_access_checks (*checks); location_t save_loc = input_location; if (DECL_P (parm)) input_location = DECL_SOURCE_LOCATION (parm); if (saw_undeduced == 1 && TREE_CODE (parm) == PARM_DECL && uses_template_parms (TREE_TYPE (parm))) { /* The type of this non-type parameter depends on undeduced parameters. Don't try to use its default argument yet, since we might deduce an argument for it on the next pass, but do check whether the arguments we already have cause substitution failure, so that that happens before we try later default arguments (78489). */ ++processing_template_decl; tree type = tsubst (TREE_TYPE (parm), full_targs, complain, NULL_TREE); --processing_template_decl; if (type == error_mark_node) arg = error_mark_node; else arg = NULL_TREE; } else { /* Even if the call is happening in template context, getting here means it's non-dependent, and a default argument is considered a separate definition under [temp.decls], so we can do this substitution without processing_template_decl. This is important if the default argument contains something that might be instantiation-dependent like access (87480). */ processing_template_decl_sentinel s; tree substed = NULL_TREE; if (saw_undeduced == 1) { /* First instatiate in template context, in case we still depend on undeduced template parameters. */ ++processing_template_decl; substed = tsubst_template_arg (arg, full_targs, complain, NULL_TREE); --processing_template_decl; if (substed != error_mark_node && !uses_template_parms (substed)) /* We replaced all the tparms, substitute again out of template context. */ substed = NULL_TREE; } if (!substed) substed = tsubst_template_arg (arg, full_targs, complain, NULL_TREE); if (!uses_template_parms (substed)) arg = convert_template_argument (parm, substed, full_targs, complain, i, NULL_TREE); else if (saw_undeduced == 1) arg = NULL_TREE; else arg = error_mark_node; } input_location = save_loc; *checks = get_deferred_access_checks (); pop_deferring_access_checks (); if (arg == error_mark_node) return 1; else if (arg) { TREE_VEC_ELT (targs, i) = arg; /* The position of the first default template argument, is also the number of non-defaulted arguments in TARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, i); } } if (saw_undeduced++ == 1) goto again; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, TREE_VEC_LENGTH (targs)); return unify_success (explain_p); } /* Subroutine of type_unification_real. Args are like the variables at the call site. ARG is an overloaded function (or template-id); we try deducing template args from each of the overloads, and if only one succeeds, we go with that. Modifies TARGS and returns true on success. */ static bool resolve_overloaded_unification (tree tparms, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool explain_p) { tree tempargs = copy_node (targs); int good = 0; tree goodfn = NULL_TREE; bool addr_p; if (TREE_CODE (arg) == ADDR_EXPR) { arg = TREE_OPERAND (arg, 0); addr_p = true; } else addr_p = false; if (TREE_CODE (arg) == COMPONENT_REF) /* Handle `&x' where `x' is some static or non-static member function name. */ arg = TREE_OPERAND (arg, 1); if (TREE_CODE (arg) == OFFSET_REF) arg = TREE_OPERAND (arg, 1); /* Strip baselink information. */ if (BASELINK_P (arg)) arg = BASELINK_FUNCTIONS (arg); if (TREE_CODE (arg) == TEMPLATE_ID_EXPR) { /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ int ok = 0; tree expl_subargs = TREE_OPERAND (arg, 1); arg = TREE_OPERAND (arg, 0); for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { fn = instantiate_template (fn, subargs, tf_none); if (!constraints_satisfied_p (fn)) continue; if (undeduced_auto_decl (fn)) { /* Instantiate the function to deduce its return type. */ ++function_depth; instantiate_decl (fn, /*defer*/false, /*class*/false); --function_depth; } if (flag_noexcept_type) maybe_instantiate_noexcept (fn, tf_none); elem = TREE_TYPE (fn); if (try_one_overload (tparms, targs, tempargs, parm, elem, strict, sub_strict, addr_p, explain_p) && (!goodfn || !same_type_p (goodfn, elem))) { goodfn = elem; ++good; } } else if (subargs) ++ok; } /* If no templates (or more than one) are fully resolved by the explicit arguments, this template-id is a non-deduced context; it could still be OK if we deduce all template arguments for the enclosing call through other arguments. */ if (good != 1) good = ok; } else if (!OVL_P (arg)) /* If ARG is, for example, "(0, &f)" then its type will be unknown -- but the deduction does not succeed because the expression is not just the function on its own. */ return false; else for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; if (try_one_overload (tparms, targs, tempargs, parm, TREE_TYPE (fn), strict, sub_strict, addr_p, explain_p) && (!goodfn || !decls_match (goodfn, fn))) { goodfn = fn; ++good; } } /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if we found multiple possibilities, we return success but don't deduce anything. */ if (good == 1) { int i = TREE_VEC_LENGTH (targs); for (; i--; ) if (TREE_VEC_ELT (tempargs, i)) { tree old = TREE_VEC_ELT (targs, i); tree new_ = TREE_VEC_ELT (tempargs, i); if (new_ && old && ARGUMENT_PACK_P (old) && ARGUMENT_PACK_EXPLICIT_ARGS (old)) /* Don't forget explicit template arguments in a pack. */ ARGUMENT_PACK_EXPLICIT_ARGS (new_) = ARGUMENT_PACK_EXPLICIT_ARGS (old); TREE_VEC_ELT (targs, i) = new_; } } if (good) return true; return false; } /* Core DR 115: In contexts where deduction is done and fails, or in contexts where deduction is not done, if a template argument list is specified and it, along with any default template arguments, identifies a single function template specialization, then the template-id is an lvalue for the function template specialization. */ tree resolve_nondeduced_context (tree orig_expr, tsubst_flags_t complain) { tree expr, offset, baselink; bool addr; if (!type_unknown_p (orig_expr)) return orig_expr; expr = orig_expr; addr = false; offset = NULL_TREE; baselink = NULL_TREE; if (TREE_CODE (expr) == ADDR_EXPR) { expr = TREE_OPERAND (expr, 0); addr = true; } if (TREE_CODE (expr) == OFFSET_REF) { offset = expr; expr = TREE_OPERAND (expr, 1); } if (BASELINK_P (expr)) { baselink = expr; expr = BASELINK_FUNCTIONS (expr); } if (TREE_CODE (expr) == TEMPLATE_ID_EXPR) { int good = 0; tree goodfn = NULL_TREE; /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ tree expl_subargs = TREE_OPERAND (expr, 1); tree arg = TREE_OPERAND (expr, 0); tree badfn = NULL_TREE; tree badargs = NULL_TREE; for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { elem = instantiate_template (fn, subargs, tf_none); if (elem == error_mark_node) { badfn = fn; badargs = subargs; } else if (elem && (!goodfn || !decls_match (goodfn, elem)) && constraints_satisfied_p (elem)) { goodfn = elem; ++good; } } } if (good == 1) { mark_used (goodfn); expr = goodfn; if (baselink) expr = build_baselink (BASELINK_BINFO (baselink), BASELINK_ACCESS_BINFO (baselink), expr, BASELINK_OPTYPE (baselink)); if (offset) { tree base = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (offset, 0))); expr = build_offset_ref (base, expr, addr, complain); } if (addr) expr = cp_build_addr_expr (expr, complain); return expr; } else if (good == 0 && badargs && (complain & tf_error)) /* There were no good options and at least one bad one, so let the user know what the problem is. */ instantiate_template (badfn, badargs, complain); } return orig_expr; } /* As above, but error out if the expression remains overloaded. */ tree resolve_nondeduced_context_or_error (tree exp, tsubst_flags_t complain) { exp = resolve_nondeduced_context (exp, complain); if (type_unknown_p (exp)) { if (complain & tf_error) cxx_incomplete_type_error (exp, TREE_TYPE (exp)); return error_mark_node; } return exp; } /* Subroutine of resolve_overloaded_unification; does deduction for a single overload. Fills TARGS with any deduced arguments, or error_mark_node if different overloads deduce different arguments for a given parm. ADDR_P is true if the expression for which deduction is being performed was of the form "& fn" rather than simply "fn". Returns 1 on success. */ static int try_one_overload (tree tparms, tree orig_targs, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool addr_p, bool explain_p) { int nargs; tree tempargs; int i; if (arg == error_mark_node) return 0; /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if this is a template, just return success. */ if (uses_template_parms (arg)) return 1; if (TREE_CODE (arg) == METHOD_TYPE) arg = build_ptrmemfunc_type (build_pointer_type (arg)); else if (addr_p) arg = build_pointer_type (arg); sub_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, NULL); /* We don't copy orig_targs for this because if we have already deduced some template args from previous args, unify would complain when we try to deduce a template parameter for the same argument, even though there isn't really a conflict. */ nargs = TREE_VEC_LENGTH (targs); tempargs = make_tree_vec (nargs); if (unify (tparms, tempargs, parm, arg, sub_strict, explain_p)) return 0; /* First make sure we didn't deduce anything that conflicts with explicitly specified args. */ for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); tree oldelt = TREE_VEC_ELT (orig_targs, i); if (!elt) /*NOP*/; else if (uses_template_parms (elt)) /* Since we're unifying against ourselves, we will fill in template args used in the function parm list with our own template parms. Discard them. */ TREE_VEC_ELT (tempargs, i) = NULL_TREE; else if (oldelt && ARGUMENT_PACK_P (oldelt)) { /* Check that the argument at each index of the deduced argument pack is equivalent to the corresponding explicitly specified argument. We may have deduced more arguments than were explicitly specified, and that's OK. */ /* We used to assert ARGUMENT_PACK_INCOMPLETE_P (oldelt) here, but that's wrong if we deduce the same argument pack from multiple function arguments: it's only incomplete the first time. */ tree explicit_pack = ARGUMENT_PACK_ARGS (oldelt); tree deduced_pack = ARGUMENT_PACK_ARGS (elt); if (TREE_VEC_LENGTH (deduced_pack) < TREE_VEC_LENGTH (explicit_pack)) return 0; for (int j = 0; j < TREE_VEC_LENGTH (explicit_pack); j++) if (!template_args_equal (TREE_VEC_ELT (explicit_pack, j), TREE_VEC_ELT (deduced_pack, j))) return 0; } else if (oldelt && !template_args_equal (oldelt, elt)) return 0; } for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); if (elt) TREE_VEC_ELT (targs, i) = elt; } return 1; } /* PARM is a template class (perhaps with unbound template parameters). ARG is a fully instantiated type. If ARG can be bound to PARM, return ARG, otherwise return NULL_TREE. TPARMS and TARGS are as for unify. */ static tree try_class_unification (tree tparms, tree targs, tree parm, tree arg, bool explain_p) { tree copy_of_targs; if (!CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return NULL_TREE; else if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) /* Matches anything. */; else if (most_general_template (CLASSTYPE_TI_TEMPLATE (arg)) != most_general_template (CLASSTYPE_TI_TEMPLATE (parm))) return NULL_TREE; /* We need to make a new template argument vector for the call to unify. If we used TARGS, we'd clutter it up with the result of the attempted unification, even if this class didn't work out. We also don't want to commit ourselves to all the unifications we've already done, since unification is supposed to be done on an argument-by-argument basis. In other words, consider the following pathological case: template <int I, int J, int K> struct S {}; template <int I, int J> struct S<I, J, 2> : public S<I, I, I>, S<J, J, J> {}; template <int I, int J, int K> void f(S<I, J, K>, S<I, I, I>); void g() { S<0, 0, 0> s0; S<0, 1, 2> s2; f(s0, s2); } Now, by the time we consider the unification involving `s2', we already know that we must have `f<0, 0, 0>'. But, even though `S<0, 1, 2>' is derived from `S<0, 0, 0>', the code is invalid because there are two ways to unify base classes of S<0, 1, 2> with S<I, I, I>. If we kept the already deduced knowledge, we would reject the possibility I=1. */ copy_of_targs = make_tree_vec (TREE_VEC_LENGTH (targs)); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if (unify_bound_ttp_args (tparms, copy_of_targs, parm, arg, explain_p)) return NULL_TREE; return arg; } /* If unification failed, we're done. */ if (unify (tparms, copy_of_targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (arg), UNIFY_ALLOW_NONE, explain_p)) return NULL_TREE; return arg; } /* Given a template type PARM and a class type ARG, find the unique base type in ARG that is an instance of PARM. We do not examine ARG itself; only its base-classes. If there is not exactly one appropriate base class, return NULL_TREE. PARM may be the type of a partial specialization, as well as a plain template type. Used by unify. */ static enum template_base_result get_template_base (tree tparms, tree targs, tree parm, tree arg, bool explain_p, tree *result) { tree rval = NULL_TREE; tree binfo; gcc_assert (RECORD_OR_UNION_CODE_P (TREE_CODE (arg))); binfo = TYPE_BINFO (complete_type (arg)); if (!binfo) { /* The type could not be completed. */ *result = NULL_TREE; return tbr_incomplete_type; } /* Walk in inheritance graph order. The search order is not important, and this avoids multiple walks of virtual bases. */ for (binfo = TREE_CHAIN (binfo); binfo; binfo = TREE_CHAIN (binfo)) { tree r = try_class_unification (tparms, targs, parm, BINFO_TYPE (binfo), explain_p); if (r) { /* If there is more than one satisfactory baseclass, then: [temp.deduct.call] If they yield more than one possible deduced A, the type deduction fails. applies. */ if (rval && !same_type_p (r, rval)) { *result = NULL_TREE; return tbr_ambiguous_baseclass; } rval = r; } } *result = rval; return tbr_success; } /* Returns the level of DECL, which declares a template parameter. */ static int template_decl_level (tree decl) { switch (TREE_CODE (decl)) { case TYPE_DECL: case TEMPLATE_DECL: return TEMPLATE_TYPE_LEVEL (TREE_TYPE (decl)); case PARM_DECL: return TEMPLATE_PARM_LEVEL (DECL_INITIAL (decl)); default: gcc_unreachable (); } return 0; } /* Decide whether ARG can be unified with PARM, considering only the cv-qualifiers of each type, given STRICT as documented for unify. Returns nonzero iff the unification is OK on that basis. */ static int check_cv_quals_for_unify (int strict, tree arg, tree parm) { int arg_quals = cp_type_quals (arg); int parm_quals = cp_type_quals (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM && !(strict & UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) { /* Although a CVR qualifier is ignored when being applied to a substituted template parameter ([8.3.2]/1 for example), that does not allow us to unify "const T" with "int&" because both types are not of the form "cv-list T" [14.8.2.5 temp.deduct.type]. It is ok when we're allowing additional CV qualifiers at the outer level [14.8.2.1]/3,1st bullet. */ if ((TYPE_REF_P (arg) || FUNC_OR_METHOD_TYPE_P (arg)) && (parm_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE))) return 0; if ((!INDIRECT_TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_TYPE_PARM) && (parm_quals & TYPE_QUAL_RESTRICT)) return 0; } if (!(strict & (UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) && (arg_quals & parm_quals) != parm_quals) return 0; if (!(strict & (UNIFY_ALLOW_LESS_CV_QUAL | UNIFY_ALLOW_OUTER_LESS_CV_QUAL)) && (parm_quals & arg_quals) != arg_quals) return 0; return 1; } /* Determines the LEVEL and INDEX for the template parameter PARM. */ void template_parm_level_and_index (tree parm, int* level, int* index) { if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { *index = TEMPLATE_TYPE_IDX (parm); *level = TEMPLATE_TYPE_LEVEL (parm); } else { *index = TEMPLATE_PARM_IDX (parm); *level = TEMPLATE_PARM_LEVEL (parm); } } #define RECUR_AND_CHECK_FAILURE(TP, TA, P, A, S, EP) \ do { \ if (unify (TP, TA, P, A, S, EP)) \ return 1; \ } while (0) /* Unifies the remaining arguments in PACKED_ARGS with the pack expansion at the end of PACKED_PARMS. Returns 0 if the type deduction succeeds, 1 otherwise. STRICT is the same as in fn_type_unification. CALL_ARGS_P is true iff PACKED_ARGS is actually a function call argument list. We'll need to adjust the arguments to make them types. SUBR tells us if this is from a recursive call to type_unification_real, or for comparing two template argument lists. */ static int unify_pack_expansion (tree tparms, tree targs, tree packed_parms, tree packed_args, unification_kind_t strict, bool subr, bool explain_p) { tree parm = TREE_VEC_ELT (packed_parms, TREE_VEC_LENGTH (packed_parms) - 1); tree pattern = PACK_EXPANSION_PATTERN (parm); tree pack, packs = NULL_TREE; int i, start = TREE_VEC_LENGTH (packed_parms) - 1; /* Add in any args remembered from an earlier partial instantiation. */ targs = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (parm), targs); int levels = TMPL_ARGS_DEPTH (targs); packed_args = expand_template_argument_pack (packed_args); int len = TREE_VEC_LENGTH (packed_args); /* Determine the parameter packs we will be deducing from the pattern, and record their current deductions. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (parm); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); int idx, level; /* Only template parameter packs can be deduced, not e.g. function parameter packs or __bases or __integer_pack. */ if (!TEMPLATE_PARM_P (parm_pack)) continue; /* Determine the index and level of this parameter pack. */ template_parm_level_and_index (parm_pack, &level, &idx); if (level < levels) continue; /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, TMPL_ARG (targs, level, idx), packs); TREE_TYPE (packs) = make_tree_vec (len - start); } /* Loop through all of the arguments that have not yet been unified and unify each with the pattern. */ for (i = start; i < len; i++) { tree parm; bool any_explicit = false; tree arg = TREE_VEC_ELT (packed_args, i); /* For each parameter pack, set its TMPL_ARG to either NULL_TREE or the element of its argument pack at the current index if this argument was explicitly specified. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; tree arg, pargs; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); arg = NULL_TREE; if (TREE_VALUE (pack) && (pargs = ARGUMENT_PACK_EXPLICIT_ARGS (TREE_VALUE (pack))) && (i - start < TREE_VEC_LENGTH (pargs))) { any_explicit = true; arg = TREE_VEC_ELT (pargs, i - start); } TMPL_ARG (targs, level, idx) = arg; } /* If we had explicit template arguments, substitute them into the pattern before deduction. */ if (any_explicit) { /* Some arguments might still be unspecified or dependent. */ bool dependent; ++processing_template_decl; dependent = any_dependent_template_arguments_p (targs); if (!dependent) --processing_template_decl; parm = tsubst (pattern, targs, explain_p ? tf_warning_or_error : tf_none, NULL_TREE); if (dependent) --processing_template_decl; if (parm == error_mark_node) return 1; } else parm = pattern; /* Unify the pattern with the current argument. */ if (unify_one_argument (tparms, targs, parm, arg, subr, strict, explain_p)) return 1; /* For each parameter pack, collect the deduced value. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TREE_VEC_ELT (TREE_TYPE (pack), i - start) = TMPL_ARG (targs, level, idx); } } /* Verify that the results of unification with the parameter packs produce results consistent with what we've seen before, and make the deduced argument packs available. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree old_pack = TREE_VALUE (pack); tree new_args = TREE_TYPE (pack); int i, len = TREE_VEC_LENGTH (new_args); int idx, level; bool nondeduced_p = false; /* By default keep the original deduced argument pack. If necessary, more specific code is going to update the resulting deduced argument later down in this function. */ template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TMPL_ARG (targs, level, idx) = old_pack; /* If NEW_ARGS contains any NULL_TREE entries, we didn't actually deduce anything. */ for (i = 0; i < len && !nondeduced_p; ++i) if (TREE_VEC_ELT (new_args, i) == NULL_TREE) nondeduced_p = true; if (nondeduced_p) continue; if (old_pack && ARGUMENT_PACK_INCOMPLETE_P (old_pack)) { /* If we had fewer function args than explicit template args, just use the explicits. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); int explicit_len = TREE_VEC_LENGTH (explicit_args); if (len < explicit_len) new_args = explicit_args; } if (!old_pack) { tree result; /* Build the deduced *_ARGUMENT_PACK. */ if (TREE_CODE (TREE_PURPOSE (pack)) == TEMPLATE_PARM_INDEX) { result = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (result) = 1; } else result = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (result, new_args); /* Note the deduced argument packs for this parameter pack. */ TMPL_ARG (targs, level, idx) = result; } else if (ARGUMENT_PACK_INCOMPLETE_P (old_pack) && (ARGUMENT_PACK_ARGS (old_pack) == ARGUMENT_PACK_EXPLICIT_ARGS (old_pack))) { /* We only had the explicitly-provided arguments before, but now we have a complete set of arguments. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); SET_ARGUMENT_PACK_ARGS (old_pack, new_args); ARGUMENT_PACK_INCOMPLETE_P (old_pack) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (old_pack) = explicit_args; } else { tree bad_old_arg = NULL_TREE, bad_new_arg = NULL_TREE; tree old_args = ARGUMENT_PACK_ARGS (old_pack); if (!comp_template_args (old_args, new_args, &bad_old_arg, &bad_new_arg)) /* Inconsistent unification of this parameter pack. */ return unify_parameter_pack_inconsistent (explain_p, bad_old_arg, bad_new_arg); } } return unify_success (explain_p); } /* Handle unification of the domain of an array. PARM_DOM and ARG_DOM are INTEGER_TYPEs representing the TYPE_DOMAIN of ARRAY_TYPEs. The other parameters and return value are as for unify. */ static int unify_array_domain (tree tparms, tree targs, tree parm_dom, tree arg_dom, bool explain_p) { tree parm_max; tree arg_max; bool parm_cst; bool arg_cst; /* Our representation of array types uses "N - 1" as the TYPE_MAX_VALUE for an array with "N" elements, if "N" is not an integer constant. We cannot unify arbitrarily complex expressions, so we eliminate the MINUS_EXPRs here. */ parm_max = TYPE_MAX_VALUE (parm_dom); parm_cst = TREE_CODE (parm_max) == INTEGER_CST; if (!parm_cst) { gcc_assert (TREE_CODE (parm_max) == MINUS_EXPR); parm_max = TREE_OPERAND (parm_max, 0); } arg_max = TYPE_MAX_VALUE (arg_dom); arg_cst = TREE_CODE (arg_max) == INTEGER_CST; if (!arg_cst) { /* The ARG_MAX may not be a simple MINUS_EXPR, if we are trying to unify the type of a variable with the type of a template parameter. For example: template <unsigned int N> void f (char (&) [N]); int g(); void h(int i) { char a[g(i)]; f(a); } Here, the type of the ARG will be "int [g(i)]", and may be a SAVE_EXPR, etc. */ if (TREE_CODE (arg_max) != MINUS_EXPR) return unify_vla_arg (explain_p, arg_dom); arg_max = TREE_OPERAND (arg_max, 0); } /* If only one of the bounds used a MINUS_EXPR, compensate by adding one to the other bound. */ if (parm_cst && !arg_cst) parm_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, parm_max, integer_one_node); else if (arg_cst && !parm_cst) arg_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, arg_max, integer_one_node); return unify (tparms, targs, parm_max, arg_max, UNIFY_ALLOW_INTEGER, explain_p); } /* Returns whether T, a P or A in unify, is a type, template or expression. */ enum pa_kind_t { pa_type, pa_tmpl, pa_expr }; static pa_kind_t pa_kind (tree t) { if (PACK_EXPANSION_P (t)) t = PACK_EXPANSION_PATTERN (t); if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (t) == UNBOUND_CLASS_TEMPLATE || DECL_TYPE_TEMPLATE_P (t)) return pa_tmpl; else if (TYPE_P (t)) return pa_type; else return pa_expr; } /* Deduce the value of template parameters. TPARMS is the (innermost) set of template parameters to a template. TARGS is the bindings for those template parameters, as determined thus far; TARGS may include template arguments for outer levels of template parameters as well. PARM is a parameter to a template function, or a subcomponent of that parameter; ARG is the corresponding argument. This function attempts to match PARM with ARG in a manner consistent with the existing assignments in TARGS. If more values are deduced, then TARGS is updated. Returns 0 if the type deduction succeeds, 1 otherwise. The parameter STRICT is a bitwise or of the following flags: UNIFY_ALLOW_NONE: Require an exact match between PARM and ARG. UNIFY_ALLOW_MORE_CV_QUAL: Allow the deduced ARG to be more cv-qualified (by qualification conversion) than ARG. UNIFY_ALLOW_LESS_CV_QUAL: Allow the deduced ARG to be less cv-qualified than ARG. UNIFY_ALLOW_DERIVED: Allow the deduced ARG to be a template base class of ARG, or a pointer to a template base class of the type pointed to by ARG. UNIFY_ALLOW_INTEGER: Allow any integral type to be deduced. See the TEMPLATE_PARM_INDEX case for more information. UNIFY_ALLOW_OUTER_LEVEL: This is the outermost level of a deduction. Used to determine validity of qualification conversions. A valid qualification conversion must have const qualified pointers leading up to the inner type which requires additional CV quals, except at the outer level, where const is not required [conv.qual]. It would be normal to set this flag in addition to setting UNIFY_ALLOW_MORE_CV_QUAL. UNIFY_ALLOW_OUTER_MORE_CV_QUAL: This is the outermost level of a deduction, and PARM can be more CV qualified at this point. UNIFY_ALLOW_OUTER_LESS_CV_QUAL: This is the outermost level of a deduction, and PARM can be less CV qualified at this point. */ static int unify (tree tparms, tree targs, tree parm, tree arg, int strict, bool explain_p) { int idx; tree targ; tree tparm; int strict_in = strict; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); /* I don't think this will do the right thing with respect to types. But the only case I've seen it in so far has been array bounds, where signedness is the only information lost, and I think that will be okay. VIEW_CONVERT_EXPR can appear with class NTTP, thanks to finish_id_expression_1, and are also OK. */ while (CONVERT_EXPR_P (parm) || TREE_CODE (parm) == VIEW_CONVERT_EXPR) parm = TREE_OPERAND (parm, 0); if (arg == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node || arg == init_list_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); if (parm == any_targ_node || arg == any_targ_node) return unify_success (explain_p); /* If PARM uses template parameters, then we can't bail out here, even if ARG == PARM, since we won't record unifications for the template parameters. We might need them if we're trying to figure out which of two things is more specialized. */ if (arg == parm && !uses_template_parms (parm)) return unify_success (explain_p); /* Handle init lists early, so the rest of the function can assume we're dealing with a type. */ if (BRACE_ENCLOSED_INITIALIZER_P (arg)) { tree elt, elttype; unsigned i; tree orig_parm = parm; if (!is_std_init_list (parm) && TREE_CODE (parm) != ARRAY_TYPE) /* We can only deduce from an initializer list argument if the parameter is std::initializer_list or an array; otherwise this is a non-deduced context. */ return unify_success (explain_p); if (TREE_CODE (parm) == ARRAY_TYPE) elttype = TREE_TYPE (parm); else { elttype = TREE_VEC_ELT (CLASSTYPE_TI_ARGS (parm), 0); /* Deduction is defined in terms of a single type, so just punt on the (bizarre) std::initializer_list<T...>. */ if (PACK_EXPANSION_P (elttype)) return unify_success (explain_p); } if (strict != DEDUCE_EXACT && TYPE_P (elttype) && !uses_deducible_template_parms (elttype)) /* If ELTTYPE has no deducible template parms, skip deduction from the list elements. */; else FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (arg), i, elt) { int elt_strict = strict; if (elt == error_mark_node) return unify_invalid (explain_p); if (!BRACE_ENCLOSED_INITIALIZER_P (elt)) { tree type = TREE_TYPE (elt); if (type == error_mark_node) return unify_invalid (explain_p); /* It should only be possible to get here for a call. */ gcc_assert (elt_strict & UNIFY_ALLOW_OUTER_LEVEL); elt_strict |= maybe_adjust_types_for_deduction (DEDUCE_CALL, &elttype, &type, elt); elt = type; } RECUR_AND_CHECK_FAILURE (tparms, targs, elttype, elt, elt_strict, explain_p); } if (TREE_CODE (parm) == ARRAY_TYPE && deducible_array_bound (TYPE_DOMAIN (parm))) { /* Also deduce from the length of the initializer list. */ tree max = size_int (CONSTRUCTOR_NELTS (arg)); tree idx = compute_array_index_type (NULL_TREE, max, tf_none); if (idx == error_mark_node) return unify_invalid (explain_p); return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), idx, explain_p); } /* If the std::initializer_list<T> deduction worked, replace the deduced A with std::initializer_list<A>. */ if (orig_parm != parm) { idx = TEMPLATE_TYPE_IDX (orig_parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); targ = listify (targ); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = targ; } return unify_success (explain_p); } /* If parm and arg aren't the same kind of thing (template, type, or expression), fail early. */ if (pa_kind (parm) != pa_kind (arg)) return unify_invalid (explain_p); /* Immediately reject some pairs that won't unify because of cv-qualification mismatches. */ if (TREE_CODE (arg) == TREE_CODE (parm) && TYPE_P (arg) /* It is the elements of the array which hold the cv quals of an array type, and the elements might be template type parms. We'll check when we recurse. */ && TREE_CODE (arg) != ARRAY_TYPE /* We check the cv-qualifiers when unifying with template type parameters below. We want to allow ARG `const T' to unify with PARM `T' for example, when computing which of two templates is more specialized, for example. */ && TREE_CODE (arg) != TEMPLATE_TYPE_PARM && !check_cv_quals_for_unify (strict_in, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (!(strict & UNIFY_ALLOW_OUTER_LEVEL) && TYPE_P (parm) && !CP_TYPE_CONST_P (parm)) strict &= ~UNIFY_ALLOW_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LEVEL; strict &= ~UNIFY_ALLOW_DERIVED; strict &= ~UNIFY_ALLOW_OUTER_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LESS_CV_QUAL; switch (TREE_CODE (parm)) { case TYPENAME_TYPE: case SCOPE_REF: case UNBOUND_CLASS_TEMPLATE: /* In a type which contains a nested-name-specifier, template argument values cannot be deduced for template parameters used within the nested-name-specifier. */ return unify_success (explain_p); case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_TYPE_LEVEL (parm) != template_decl_level (tparm)) /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ { if (TREE_CODE (arg) == TREE_CODE (parm) && (is_auto (parm) ? is_auto (arg) : same_type_p (parm, arg))) return unify_success (explain_p); else return unify_type_mismatch (explain_p, parm, arg); } idx = TEMPLATE_TYPE_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); tparm = TREE_VALUE (TREE_VEC_ELT (tparms, idx)); if (error_operand_p (tparm)) return unify_invalid (explain_p); /* Check for mixed types and values. */ if ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM && TREE_CODE (tparm) != TYPE_DECL) || (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM && TREE_CODE (tparm) != TEMPLATE_DECL)) gcc_unreachable (); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if ((strict_in & UNIFY_ALLOW_DERIVED) && CLASS_TYPE_P (arg)) { /* First try to match ARG directly. */ tree t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Otherwise, look for a suitable base of ARG, as below. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) return unify_no_common_base (explain_p, r, parm, arg); arg = t; } } /* ARG must be constructed from a template class or a template template parameter. */ else if (TREE_CODE (arg) != BOUND_TEMPLATE_TEMPLATE_PARM && !CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return unify_template_deduction_failure (explain_p, parm, arg); /* Deduce arguments T, i from TT<T> or TT<i>. */ if (unify_bound_ttp_args (tparms, targs, parm, arg, explain_p)) return 1; arg = TYPE_TI_TEMPLATE (arg); /* Fall through to deduce template name. */ } if (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { /* Deduce template name TT from TT, TT<>, TT<T> and TT<i>. */ /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && template_args_equal (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); } else { /* If PARM is `const T' and ARG is only `int', we don't have a match unless we are allowing additional qualification. If ARG is `const int' and PARM is just `T' that's OK; that binds `const int' to `T'. */ if (!check_cv_quals_for_unify (strict_in | UNIFY_ALLOW_LESS_CV_QUAL, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); /* Consider the case where ARG is `const volatile int' and PARM is `const T'. Then, T should be `volatile int'. */ arg = cp_build_qualified_type_real (arg, cp_type_quals (arg) & ~cp_type_quals (parm), tf_none); if (arg == error_mark_node) return unify_invalid (explain_p); /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && same_type_p (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); /* Make sure that ARG is not a variable-sized array. (Note that were talking about variable-sized arrays (like `int[n]'), rather than arrays of unknown size (like `int[]').) We'll get very confused by such a type since the bound of the array is not constant, and therefore not mangleable. Besides, such types are not allowed in ISO C++, so we can do as we please here. We do allow them for 'auto' deduction, since that isn't ABI-exposed. */ if (!is_auto (parm) && variably_modified_type_p (arg, NULL_TREE)) return unify_vla_arg (explain_p, arg); /* Strip typedefs as in convert_template_argument. */ arg = canonicalize_type_argument (arg, tf_none); } /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !template_parameter_pack_p (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); /* If the argument deduction results is a METHOD_TYPE, then there is a problem. METHOD_TYPE doesn't map to any real C++ type the result of the deduction cannot be of that type. */ if (TREE_CODE (arg) == METHOD_TYPE) return unify_method_type_error (explain_p, arg); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case TEMPLATE_PARM_INDEX: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_PARM_LEVEL (parm) != template_decl_level (tparm)) { /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ int result = !(TREE_CODE (arg) == TREE_CODE (parm) && cp_tree_equal (parm, arg)); if (result) unify_expression_unequal (explain_p, parm, arg); return result; } idx = TEMPLATE_PARM_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); if (targ) { if ((strict & UNIFY_ALLOW_INTEGER) && TREE_TYPE (targ) && TREE_TYPE (arg) && CP_INTEGRAL_TYPE_P (TREE_TYPE (targ))) /* We're deducing from an array bound, the type doesn't matter. */ arg = fold_convert (TREE_TYPE (targ), arg); int x = !cp_tree_equal (targ, arg); if (x) unify_inconsistency (explain_p, parm, targ, arg); return x; } /* [temp.deduct.type] If, in the declaration of a function template with a non-type template-parameter, the non-type template-parameter is used in an expression in the function parameter-list and, if the corresponding template-argument is deduced, the template-argument type shall match the type of the template-parameter exactly, except that a template-argument deduced from an array bound may be of any integral type. The non-type parameter might use already deduced type parameters. */ tparm = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (parm) > TMPL_ARGS_DEPTH (targs)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else { ++processing_template_decl; tparm = tsubst (tparm, targs, tf_none, NULL_TREE); --processing_template_decl; if (tree a = type_uses_auto (tparm)) { tparm = do_auto_deduction (tparm, arg, a, complain, adc_unify); if (tparm == error_mark_node) return 1; } } if (!TREE_TYPE (arg)) /* Template-parameter dependent expression. Just accept it for now. It will later be processed in convert_template_argument. */ ; else if (same_type_ignoring_top_level_qualifiers_p (non_reference (TREE_TYPE (arg)), non_reference (tparm))) /* OK. Ignore top-level quals here because a class-type template parameter object is const. */; else if ((strict & UNIFY_ALLOW_INTEGER) && CP_INTEGRAL_TYPE_P (tparm)) /* Convert the ARG to the type of PARM; the deduced non-type template argument must exactly match the types of the corresponding parameter. */ arg = fold (build_nop (tparm, arg)); else if (uses_template_parms (tparm)) { /* We haven't deduced the type of this parameter yet. */ if (cxx_dialect >= cxx17 /* We deduce from array bounds in try_array_deduction. */ && !(strict & UNIFY_ALLOW_INTEGER)) { /* Deduce it from the non-type argument. */ tree atype = TREE_TYPE (arg); RECUR_AND_CHECK_FAILURE (tparms, targs, tparm, atype, UNIFY_ALLOW_NONE, explain_p); } else /* Try again later. */ return unify_success (explain_p); } else return unify_type_mismatch (explain_p, tparm, TREE_TYPE (arg)); /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); { bool removed_attr = false; arg = strip_typedefs_expr (arg, &removed_attr); } TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case PTRMEM_CST: { /* A pointer-to-member constant can be unified only with another constant. */ if (TREE_CODE (arg) != PTRMEM_CST) return unify_ptrmem_cst_mismatch (explain_p, parm, arg); /* Just unify the class member. It would be useless (and possibly wrong, depending on the strict flags) to unify also PTRMEM_CST_CLASS, because we want to be sure that both parm and arg refer to the same variable, even if through different classes. For instance: struct A { int x; }; struct B : A { }; Unification of &A::x and &B::x must succeed. */ return unify (tparms, targs, PTRMEM_CST_MEMBER (parm), PTRMEM_CST_MEMBER (arg), strict, explain_p); } case POINTER_TYPE: { if (!TYPE_PTR_P (arg)) return unify_type_mismatch (explain_p, parm, arg); /* [temp.deduct.call] A can be another pointer or pointer to member type that can be converted to the deduced A via a qualification conversion (_conv.qual_). We pass down STRICT here rather than UNIFY_ALLOW_NONE. This will allow for additional cv-qualification of the pointed-to types if appropriate. */ if (TREE_CODE (TREE_TYPE (arg)) == RECORD_TYPE) /* The derived-to-base conversion only persists through one level of pointers. */ strict |= (strict_in & UNIFY_ALLOW_DERIVED); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); } case REFERENCE_TYPE: if (!TYPE_REF_P (arg)) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); case ARRAY_TYPE: if (TREE_CODE (arg) != ARRAY_TYPE) return unify_type_mismatch (explain_p, parm, arg); if ((TYPE_DOMAIN (parm) == NULL_TREE) != (TYPE_DOMAIN (arg) == NULL_TREE)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); if (TYPE_DOMAIN (parm) != NULL_TREE) return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), TYPE_DOMAIN (arg), explain_p); return unify_success (explain_p); case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case INTEGER_TYPE: case BOOLEAN_TYPE: case ENUMERAL_TYPE: case VOID_TYPE: case NULLPTR_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* We have already checked cv-qualification at the top of the function. */ if (!same_type_ignoring_top_level_qualifiers_p (arg, parm)) return unify_type_mismatch (explain_p, parm, arg); /* As far as unification is concerned, this wins. Later checks will invalidate it if necessary. */ return unify_success (explain_p); /* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */ /* Type INTEGER_CST can come from ordinary constant template args. */ case INTEGER_CST: while (CONVERT_EXPR_P (arg)) arg = TREE_OPERAND (arg, 0); if (TREE_CODE (arg) != INTEGER_CST) return unify_template_argument_mismatch (explain_p, parm, arg); return (tree_int_cst_equal (parm, arg) ? unify_success (explain_p) : unify_template_argument_mismatch (explain_p, parm, arg)); case TREE_VEC: { int i, len, argslen; int parm_variadic_p = 0; if (TREE_CODE (arg) != TREE_VEC) return unify_template_argument_mismatch (explain_p, parm, arg); len = TREE_VEC_LENGTH (parm); argslen = TREE_VEC_LENGTH (arg); /* Check for pack expansions in the parameters. */ for (i = 0; i < len; ++i) { if (PACK_EXPANSION_P (TREE_VEC_ELT (parm, i))) { if (i == len - 1) /* We can unify against something with a trailing parameter pack. */ parm_variadic_p = 1; else /* [temp.deduct.type]/9: If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ return unify_success (explain_p); } } /* If we don't have enough arguments to satisfy the parameters (not counting the pack expression at the end), or we have too many arguments for a parameter list that doesn't end in a pack expression, we can't unify. */ if (parm_variadic_p ? argslen < len - parm_variadic_p : argslen != len) return unify_arity (explain_p, TREE_VEC_LENGTH (arg), len); /* Unify all of the parameters that precede the (optional) pack expression. */ for (i = 0; i < len - parm_variadic_p; ++i) { RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_VEC_ELT (parm, i), TREE_VEC_ELT (arg, i), UNIFY_ALLOW_NONE, explain_p); } if (parm_variadic_p) return unify_pack_expansion (tparms, targs, parm, arg, DEDUCE_EXACT, /*subr=*/true, explain_p); return unify_success (explain_p); } case RECORD_TYPE: case UNION_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); if (TYPE_PTRMEMFUNC_P (parm)) { if (!TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TYPE_PTRMEMFUNC_FN_TYPE (parm), TYPE_PTRMEMFUNC_FN_TYPE (arg), strict, explain_p); } else if (TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); if (CLASSTYPE_TEMPLATE_INFO (parm)) { tree t = NULL_TREE; if (strict_in & UNIFY_ALLOW_DERIVED) { /* First, we try to unify the PARM and ARG directly. */ t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Fallback to the special case allowed in [temp.deduct.call]: If P is a class, and P has the form template-id, then A can be a derived class of the deduced A. Likewise, if P is a pointer to a class of the form template-id, A can be a pointer to a derived class pointed to by the deduced A. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) { /* Don't give the derived diagnostic if we're already dealing with the same template. */ bool same_template = (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))); return unify_no_common_base (explain_p && !same_template, r, parm, arg); } } } else if (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))) /* Perhaps PARM is something like S<U> and ARG is S<int>. Then, we should unify `int' and `U'. */ t = arg; else /* There's no chance of unification succeeding. */ return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (t), UNIFY_ALLOW_NONE, explain_p); } else if (!same_type_ignoring_top_level_qualifiers_p (parm, arg)) return unify_type_mismatch (explain_p, parm, arg); return unify_success (explain_p); case METHOD_TYPE: case FUNCTION_TYPE: { unsigned int nargs; tree *args; tree a; unsigned int i; if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* CV qualifications for methods can never be deduced, they must match exactly. We need to check them explicitly here, because type_unification_real treats them as any other cv-qualified parameter. */ if (TREE_CODE (parm) == METHOD_TYPE && (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, class_of_this_parm (arg), class_of_this_parm (parm)))) return unify_cv_qual_mismatch (explain_p, parm, arg); if (TREE_CODE (arg) == FUNCTION_TYPE && type_memfn_quals (parm) != type_memfn_quals (arg)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (type_memfn_rqual (parm) != type_memfn_rqual (arg)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); nargs = list_length (TYPE_ARG_TYPES (arg)); args = XALLOCAVEC (tree, nargs); for (a = TYPE_ARG_TYPES (arg), i = 0; a != NULL_TREE && a != void_list_node; a = TREE_CHAIN (a), ++i) args[i] = TREE_VALUE (a); nargs = i; if (type_unification_real (tparms, targs, TYPE_ARG_TYPES (parm), args, nargs, 1, DEDUCE_EXACT, NULL, explain_p)) return 1; if (flag_noexcept_type) { tree pspec = TYPE_RAISES_EXCEPTIONS (parm); tree aspec = canonical_eh_spec (TYPE_RAISES_EXCEPTIONS (arg)); if (pspec == NULL_TREE) pspec = noexcept_false_spec; if (aspec == NULL_TREE) aspec = noexcept_false_spec; if (TREE_PURPOSE (pspec) && TREE_PURPOSE (aspec) && uses_template_parms (TREE_PURPOSE (pspec))) RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_PURPOSE (pspec), TREE_PURPOSE (aspec), UNIFY_ALLOW_NONE, explain_p); else if (nothrow_spec_p (pspec) && !nothrow_spec_p (aspec)) return unify_type_mismatch (explain_p, parm, arg); } return 0; } case OFFSET_TYPE: /* Unify a pointer to member with a pointer to member function, which deduces the type of the member as a function type. */ if (TYPE_PTRMEMFUNC_P (arg)) { /* Check top-level cv qualifiers */ if (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_PTRMEMFUNC_OBJECT_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); /* Determine the type of the function we are unifying against. */ tree fntype = static_fn_type (arg); return unify (tparms, targs, TREE_TYPE (parm), fntype, strict, explain_p); } if (TREE_CODE (arg) != OFFSET_TYPE) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_OFFSET_BASETYPE (arg), UNIFY_ALLOW_NONE, explain_p); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); case CONST_DECL: if (DECL_TEMPLATE_PARM_P (parm)) return unify (tparms, targs, DECL_INITIAL (parm), arg, strict, explain_p); if (arg != scalar_constant_value (parm)) return unify_template_argument_mismatch (explain_p, parm, arg); return unify_success (explain_p); case FIELD_DECL: case TEMPLATE_DECL: /* Matched cases are handled by the ARG == PARM test above. */ return unify_template_argument_mismatch (explain_p, parm, arg); case VAR_DECL: /* We might get a variable as a non-type template argument in parm if the corresponding parameter is type-dependent. Make any necessary adjustments based on whether arg is a reference. */ if (CONSTANT_CLASS_P (arg)) parm = fold_non_dependent_expr (parm, complain); else if (REFERENCE_REF_P (arg)) { tree sub = TREE_OPERAND (arg, 0); STRIP_NOPS (sub); if (TREE_CODE (sub) == ADDR_EXPR) arg = TREE_OPERAND (sub, 0); } /* Now use the normal expression code to check whether they match. */ goto expr; case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return unify (tparms, targs, ARGUMENT_PACK_ARGS (parm), ARGUMENT_PACK_ARGS (arg), strict, explain_p); case TYPEOF_TYPE: case DECLTYPE_TYPE: case UNDERLYING_TYPE: /* Cannot deduce anything from TYPEOF_TYPE, DECLTYPE_TYPE, or UNDERLYING_TYPE nodes. */ return unify_success (explain_p); case ERROR_MARK: /* Unification fails if we hit an error node. */ return unify_invalid (explain_p); case INDIRECT_REF: if (REFERENCE_REF_P (parm)) { bool pexp = PACK_EXPANSION_P (arg); if (pexp) arg = PACK_EXPANSION_PATTERN (arg); if (REFERENCE_REF_P (arg)) arg = TREE_OPERAND (arg, 0); if (pexp) arg = make_pack_expansion (arg, complain); return unify (tparms, targs, TREE_OPERAND (parm, 0), arg, strict, explain_p); } /* FALLTHRU */ default: /* An unresolved overload is a nondeduced context. */ if (is_overloaded_fn (parm) || type_unknown_p (parm)) return unify_success (explain_p); gcc_assert (EXPR_P (parm) || COMPOUND_LITERAL_P (parm) || TREE_CODE (parm) == TRAIT_EXPR); expr: /* We must be looking at an expression. This can happen with something like: template <int I> void foo(S<I>, S<I + 2>); or template<typename T> void foo(A<T, T{}>); This is a "non-deduced context": [deduct.type] The non-deduced contexts are: --A non-type template argument or an array bound in which a subexpression references a template parameter. In these cases, we assume deduction succeeded, but don't actually infer any unifications. */ if (!uses_template_parms (parm) && !template_args_equal (parm, arg)) return unify_expression_unequal (explain_p, parm, arg); else return unify_success (explain_p); } } #undef RECUR_AND_CHECK_FAILURE /* Note that DECL can be defined in this translation unit, if required. */ static void mark_definable (tree decl) { tree clone; DECL_NOT_REALLY_EXTERN (decl) = 1; FOR_EACH_CLONE (clone, decl) DECL_NOT_REALLY_EXTERN (clone) = 1; } /* Called if RESULT is explicitly instantiated, or is a member of an explicitly instantiated class. */ void mark_decl_instantiated (tree result, int extern_p) { SET_DECL_EXPLICIT_INSTANTIATION (result); /* If this entity has already been written out, it's too late to make any modifications. */ if (TREE_ASM_WRITTEN (result)) return; /* consteval functions are never emitted. */ if (TREE_CODE (result) == FUNCTION_DECL && DECL_IMMEDIATE_FUNCTION_P (result)) return; /* For anonymous namespace we don't need to do anything. */ if (decl_anon_ns_mem_p (result)) { gcc_assert (!TREE_PUBLIC (result)); return; } if (TREE_CODE (result) != FUNCTION_DECL) /* The TREE_PUBLIC flag for function declarations will have been set correctly by tsubst. */ TREE_PUBLIC (result) = 1; /* This might have been set by an earlier implicit instantiation. */ DECL_COMDAT (result) = 0; if (extern_p) DECL_NOT_REALLY_EXTERN (result) = 0; else { mark_definable (result); mark_needed (result); /* Always make artificials weak. */ if (DECL_ARTIFICIAL (result) && flag_weak) comdat_linkage (result); /* For WIN32 we also want to put explicit instantiations in linkonce sections. */ else if (TREE_PUBLIC (result)) maybe_make_one_only (result); if (TREE_CODE (result) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATED (result)) /* If the function has already been instantiated, clear DECL_EXTERNAL, since start_preparsed_function wouldn't have if we had an earlier extern explicit instantiation. */ DECL_EXTERNAL (result) = 0; } /* If EXTERN_P, then this function will not be emitted -- unless followed by an explicit instantiation, at which point its linkage will be adjusted. If !EXTERN_P, then this function will be emitted here. In neither circumstance do we want import_export_decl to adjust the linkage. */ DECL_INTERFACE_KNOWN (result) = 1; } /* Subroutine of more_specialized_fn: check whether TARGS is missing any important template arguments. If any are missing, we check whether they're important by using error_mark_node for substituting into any args that were used for partial ordering (the ones between ARGS and END) and seeing if it bubbles up. */ static bool check_undeduced_parms (tree targs, tree args, tree end) { bool found = false; int i; for (i = TREE_VEC_LENGTH (targs) - 1; i >= 0; --i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) { found = true; TREE_VEC_ELT (targs, i) = error_mark_node; } if (found) { tree substed = tsubst_arg_types (args, targs, end, tf_none, NULL_TREE); if (substed == error_mark_node) return true; } return false; } /* Given two function templates PAT1 and PAT2, return: 1 if PAT1 is more specialized than PAT2 as described in [temp.func.order]. -1 if PAT2 is more specialized than PAT1. 0 if neither is more specialized. LEN indicates the number of parameters we should consider (defaulted parameters should not be considered). The 1998 std underspecified function template partial ordering, and DR214 addresses the issue. We take pairs of arguments, one from each of the templates, and deduce them against each other. One of the templates will be more specialized if all the *other* template's arguments deduce against its arguments and at least one of its arguments *does* *not* deduce against the other template's corresponding argument. Deduction is done as for class templates. The arguments used in deduction have reference and top level cv qualifiers removed. Iff both arguments were originally reference types *and* deduction succeeds in both directions, an lvalue reference wins against an rvalue reference and otherwise the template with the more cv-qualified argument wins for that pairing (if neither is more cv-qualified, they both are equal). Unlike regular deduction, after all the arguments have been deduced in this way, we do *not* verify the deduced template argument values can be substituted into non-deduced contexts. The logic can be a bit confusing here, because we look at deduce1 and targs1 to see if pat2 is at least as specialized, and vice versa; if we can find template arguments for pat1 to make arg1 look like arg2, that means that arg2 is at least as specialized as arg1. */ int more_specialized_fn (tree pat1, tree pat2, int len) { tree decl1 = DECL_TEMPLATE_RESULT (pat1); tree decl2 = DECL_TEMPLATE_RESULT (pat2); tree targs1 = make_tree_vec (DECL_NTPARMS (pat1)); tree targs2 = make_tree_vec (DECL_NTPARMS (pat2)); tree tparms1 = DECL_INNERMOST_TEMPLATE_PARMS (pat1); tree tparms2 = DECL_INNERMOST_TEMPLATE_PARMS (pat2); tree args1 = TYPE_ARG_TYPES (TREE_TYPE (decl1)); tree args2 = TYPE_ARG_TYPES (TREE_TYPE (decl2)); tree origs1, origs2; bool lose1 = false; bool lose2 = false; /* Remove the this parameter from non-static member functions. If one is a non-static member function and the other is not a static member function, remove the first parameter from that function also. This situation occurs for operator functions where we locate both a member function (with this pointer) and non-member operator (with explicit first operand). */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1)) { len--; /* LEN is the number of significant arguments for DECL1 */ args1 = TREE_CHAIN (args1); if (!DECL_STATIC_FUNCTION_P (decl2)) args2 = TREE_CHAIN (args2); } else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2)) { args2 = TREE_CHAIN (args2); if (!DECL_STATIC_FUNCTION_P (decl1)) { len--; args1 = TREE_CHAIN (args1); } } /* If only one is a conversion operator, they are unordered. */ if (DECL_CONV_FN_P (decl1) != DECL_CONV_FN_P (decl2)) return 0; /* Consider the return type for a conversion function */ if (DECL_CONV_FN_P (decl1)) { args1 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl1)), args1); args2 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl2)), args2); len++; } processing_template_decl++; origs1 = args1; origs2 = args2; while (len-- /* Stop when an ellipsis is seen. */ && args1 != NULL_TREE && args2 != NULL_TREE) { tree arg1 = TREE_VALUE (args1); tree arg2 = TREE_VALUE (args2); int deduce1, deduce2; int quals1 = -1; int quals2 = -1; int ref1 = 0; int ref2 = 0; if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION && TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { /* When both arguments are pack expansions, we need only unify the patterns themselves. */ arg1 = PACK_EXPANSION_PATTERN (arg1); arg2 = PACK_EXPANSION_PATTERN (arg2); /* This is the last comparison we need to do. */ len = 0; } /* DR 1847: If a particular P contains no template-parameters that participate in template argument deduction, that P is not used to determine the ordering. */ if (!uses_deducible_template_parms (arg1) && !uses_deducible_template_parms (arg2)) goto next; if (TYPE_REF_P (arg1)) { ref1 = TYPE_REF_IS_RVALUE (arg1) + 1; arg1 = TREE_TYPE (arg1); quals1 = cp_type_quals (arg1); } if (TYPE_REF_P (arg2)) { ref2 = TYPE_REF_IS_RVALUE (arg2) + 1; arg2 = TREE_TYPE (arg2); quals2 = cp_type_quals (arg2); } arg1 = TYPE_MAIN_VARIANT (arg1); arg2 = TYPE_MAIN_VARIANT (arg2); if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION) { int i, len2 = remaining_arguments (args2); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len2); tree ta = args2; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg1; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len2; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce1 = (unify_pack_expansion (tparms1, targs1, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG1 is a pack expansion but ARG2 is not. */ deduce2 = 0; } else if (TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { int i, len1 = remaining_arguments (args1); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len1); tree ta = args1; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg2; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len1; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce2 = (unify_pack_expansion (tparms2, targs2, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG2 is a pack expansion but ARG1 is not.*/ deduce1 = 0; } else { /* The normal case, where neither argument is a pack expansion. */ deduce1 = (unify (tparms1, targs1, arg1, arg2, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); deduce2 = (unify (tparms2, targs2, arg2, arg1, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); } /* If we couldn't deduce arguments for tparms1 to make arg1 match arg2, then arg2 is not as specialized as arg1. */ if (!deduce1) lose2 = true; if (!deduce2) lose1 = true; /* "If, for a given type, deduction succeeds in both directions (i.e., the types are identical after the transformations above) and both P and A were reference types (before being replaced with the type referred to above): - if the type from the argument template was an lvalue reference and the type from the parameter template was not, the argument type is considered to be more specialized than the other; otherwise, - if the type from the argument template is more cv-qualified than the type from the parameter template (as described above), the argument type is considered to be more specialized than the other; otherwise, - neither type is more specialized than the other." */ if (deduce1 && deduce2) { if (ref1 && ref2 && ref1 != ref2) { if (ref1 > ref2) lose1 = true; else lose2 = true; } else if (quals1 != quals2 && quals1 >= 0 && quals2 >= 0) { if ((quals1 & quals2) == quals2) lose2 = true; if ((quals1 & quals2) == quals1) lose1 = true; } } if (lose1 && lose2) /* We've failed to deduce something in either direction. These must be unordered. */ break; next: if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION || TREE_CODE (arg2) == TYPE_PACK_EXPANSION) /* We have already processed all of the arguments in our handing of the pack expansion type. */ len = 0; args1 = TREE_CHAIN (args1); args2 = TREE_CHAIN (args2); } /* "In most cases, all template parameters must have values in order for deduction to succeed, but for partial ordering purposes a template parameter may remain without a value provided it is not used in the types being used for partial ordering." Thus, if we are missing any of the targs1 we need to substitute into origs1, then pat2 is not as specialized as pat1. This can happen when there is a nondeduced context. */ if (!lose2 && check_undeduced_parms (targs1, origs1, args1)) lose2 = true; if (!lose1 && check_undeduced_parms (targs2, origs2, args2)) lose1 = true; processing_template_decl--; /* If both deductions succeed, the partial ordering selects the more constrained template. */ /* P2113: If the corresponding template-parameters of the template-parameter-lists are not equivalent ([temp.over.link]) or if the function parameters that positionally correspond between the two templates are not of the same type, neither template is more specialized than the other. */ if (!lose1 && !lose2 && comp_template_parms (DECL_TEMPLATE_PARMS (pat1), DECL_TEMPLATE_PARMS (pat2)) && compparms (origs1, origs2)) { int winner = more_constrained (decl1, decl2); if (winner > 0) lose2 = true; else if (winner < 0) lose1 = true; } /* All things being equal, if the next argument is a pack expansion for one function but not for the other, prefer the non-variadic function. FIXME this is bogus; see c++/41958. */ if (lose1 == lose2 && args1 && TREE_VALUE (args1) && args2 && TREE_VALUE (args2)) { lose1 = TREE_CODE (TREE_VALUE (args1)) == TYPE_PACK_EXPANSION; lose2 = TREE_CODE (TREE_VALUE (args2)) == TYPE_PACK_EXPANSION; } if (lose1 == lose2) return 0; else if (!lose1) return 1; else return -1; } /* Determine which of two partial specializations of TMPL is more specialized. PAT1 is a TREE_LIST whose TREE_VALUE is the TEMPLATE_DECL corresponding to the first partial specialization. The TREE_PURPOSE is the innermost set of template parameters for the partial specialization. PAT2 is similar, but for the second template. Return 1 if the first partial specialization is more specialized; -1 if the second is more specialized; 0 if neither is more specialized. See [temp.class.order] for information about determining which of two templates is more specialized. */ static int more_specialized_partial_spec (tree tmpl, tree pat1, tree pat2) { tree targs; int winner = 0; bool any_deductions = false; tree tmpl1 = TREE_VALUE (pat1); tree tmpl2 = TREE_VALUE (pat2); tree specargs1 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl1))); tree specargs2 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl2))); /* Just like what happens for functions, if we are ordering between different template specializations, we may encounter dependent types in the arguments, and we need our dependency check functions to behave correctly. */ ++processing_template_decl; targs = get_partial_spec_bindings (tmpl, tmpl1, specargs2); if (targs) { --winner; any_deductions = true; } targs = get_partial_spec_bindings (tmpl, tmpl2, specargs1); if (targs) { ++winner; any_deductions = true; } --processing_template_decl; /* If both deductions succeed, the partial ordering selects the more constrained template. */ if (!winner && any_deductions) winner = more_constrained (tmpl1, tmpl2); /* In the case of a tie where at least one of the templates has a parameter pack at the end, the template with the most non-packed parameters wins. */ if (winner == 0 && any_deductions && (template_args_variadic_p (TREE_PURPOSE (pat1)) || template_args_variadic_p (TREE_PURPOSE (pat2)))) { tree args1 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat1)); tree args2 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat2)); int len1 = TREE_VEC_LENGTH (args1); int len2 = TREE_VEC_LENGTH (args2); /* We don't count the pack expansion at the end. */ if (template_args_variadic_p (TREE_PURPOSE (pat1))) --len1; if (template_args_variadic_p (TREE_PURPOSE (pat2))) --len2; if (len1 > len2) return 1; else if (len1 < len2) return -1; } return winner; } /* Return the template arguments that will produce the function signature DECL from the function template FN, with the explicit template arguments EXPLICIT_ARGS. If CHECK_RETTYPE is true, the return type must also match. Return NULL_TREE if no satisfactory arguments could be found. */ static tree get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype) { int ntparms = DECL_NTPARMS (fn); tree targs = make_tree_vec (ntparms); tree decl_type = TREE_TYPE (decl); tree decl_arg_types; tree *args; unsigned int nargs, ix; tree arg; gcc_assert (decl != DECL_TEMPLATE_RESULT (fn)); /* Never do unification on the 'this' parameter. */ decl_arg_types = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (decl_type)); nargs = list_length (decl_arg_types); args = XALLOCAVEC (tree, nargs); for (arg = decl_arg_types, ix = 0; arg != NULL_TREE && arg != void_list_node; arg = TREE_CHAIN (arg), ++ix) args[ix] = TREE_VALUE (arg); if (fn_type_unification (fn, explicit_args, targs, args, ix, (check_rettype || DECL_CONV_FN_P (fn) ? TREE_TYPE (decl_type) : NULL_TREE), DEDUCE_EXACT, LOOKUP_NORMAL, NULL, /*explain_p=*/false, /*decltype*/false) == error_mark_node) return NULL_TREE; return targs; } /* Return the innermost template arguments that, when applied to a partial specialization SPEC_TMPL of TMPL, yield the ARGS. For example, suppose we have: template <class T, class U> struct S {}; template <class T> struct S<T*, int> {}; Then, suppose we want to get `S<double*, int>'. SPEC_TMPL will be the partial specialization and the ARGS will be {double*, int}. The resulting vector will be {double}, indicating that `T' is bound to `double'. */ static tree get_partial_spec_bindings (tree tmpl, tree spec_tmpl, tree args) { tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (spec_tmpl); tree spec_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (spec_tmpl))); int i, ntparms = TREE_VEC_LENGTH (tparms); tree deduced_args; tree innermost_deduced_args; innermost_deduced_args = make_tree_vec (ntparms); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { deduced_args = copy_node (args); SET_TMPL_ARGS_LEVEL (deduced_args, TMPL_ARGS_DEPTH (deduced_args), innermost_deduced_args); } else deduced_args = innermost_deduced_args; bool tried_array_deduction = (cxx_dialect < cxx17); again: if (unify (tparms, deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args), UNIFY_ALLOW_NONE, /*explain_p=*/false)) return NULL_TREE; for (i = 0; i < ntparms; ++i) if (! TREE_VEC_ELT (innermost_deduced_args, i)) { if (!tried_array_deduction) { try_array_deduction (tparms, innermost_deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args)); tried_array_deduction = true; if (TREE_VEC_ELT (innermost_deduced_args, i)) goto again; } return NULL_TREE; } if (!push_tinst_level (spec_tmpl, deduced_args)) { excessive_deduction_depth = true; return NULL_TREE; } /* Verify that nondeduced template arguments agree with the type obtained from argument deduction. For example: struct A { typedef int X; }; template <class T, class U> struct C {}; template <class T> struct C<T, typename T::X> {}; Then with the instantiation `C<A, int>', we can deduce that `T' is `A' but unify () does not check whether `typename T::X' is `int'. */ spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE); if (spec_args != error_mark_node) spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl), INNERMOST_TEMPLATE_ARGS (spec_args), tmpl, tf_none, false, false); pop_tinst_level (); if (spec_args == error_mark_node /* We only need to check the innermost arguments; the other arguments will always agree. */ || !comp_template_args_porder (INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args))) return NULL_TREE; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. See the use of template_template_parm_bindings_ok_p in fn_type_unification for more information. */ if (!template_template_parm_bindings_ok_p (tparms, deduced_args)) return NULL_TREE; return deduced_args; } // Compare two function templates T1 and T2 by deducing bindings // from one against the other. If both deductions succeed, compare // constraints to see which is more constrained. static int more_specialized_inst (tree t1, tree t2) { int fate = 0; int count = 0; if (get_bindings (t1, DECL_TEMPLATE_RESULT (t2), NULL_TREE, true)) { --fate; ++count; } if (get_bindings (t2, DECL_TEMPLATE_RESULT (t1), NULL_TREE, true)) { ++fate; ++count; } // If both deductions succeed, then one may be more constrained. if (count == 2 && fate == 0) fate = more_constrained (t1, t2); return fate; } /* TEMPLATES is a TREE_LIST. Each TREE_VALUE is a TEMPLATE_DECL. Return the TREE_LIST node with the most specialized template, if any. If there is no most specialized template, the error_mark_node is returned. Note that this function does not look at, or modify, the TREE_PURPOSE or TREE_TYPE of any of the nodes. Since the node returned is one of the elements of INSTANTIATIONS, callers may store information in the TREE_PURPOSE or TREE_TYPE of the nodes, and retrieve it from the value returned. */ tree most_specialized_instantiation (tree templates) { tree fn, champ; ++processing_template_decl; champ = templates; for (fn = TREE_CHAIN (templates); fn; fn = TREE_CHAIN (fn)) { gcc_assert (TREE_VALUE (champ) != TREE_VALUE (fn)); int fate = more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)); if (fate == -1) champ = fn; else if (!fate) { /* Equally specialized, move to next function. If there is no next function, nothing's most specialized. */ fn = TREE_CHAIN (fn); champ = fn; if (!fn) break; } } if (champ) /* Now verify that champ is better than everything earlier in the instantiation list. */ for (fn = templates; fn != champ; fn = TREE_CHAIN (fn)) { if (more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)) != 1) { champ = NULL_TREE; break; } } processing_template_decl--; if (!champ) return error_mark_node; return champ; } /* If DECL is a specialization of some template, return the most general such template. Otherwise, returns NULL_TREE. For example, given: template <class T> struct S { template <class U> void f(U); }; if TMPL is `template <class U> void S<int>::f(U)' this will return the full template. This function will not trace past partial specializations, however. For example, given in addition: template <class T> struct S<T*> { template <class U> void f(U); }; if TMPL is `template <class U> void S<int*>::f(U)' this will return `template <class T> template <class U> S<T*>::f(U)'. */ tree most_general_template (tree decl) { if (TREE_CODE (decl) != TEMPLATE_DECL) { if (tree tinfo = get_template_info (decl)) decl = TI_TEMPLATE (tinfo); /* The TI_TEMPLATE can be an IDENTIFIER_NODE for a template friend, or a FIELD_DECL for a capture pack. */ if (TREE_CODE (decl) != TEMPLATE_DECL) return NULL_TREE; } /* Look for more and more general templates. */ while (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)) { /* The DECL_TI_TEMPLATE can be an IDENTIFIER_NODE in some cases. (See cp-tree.h for details.) */ if (TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) break; if (CLASS_TYPE_P (TREE_TYPE (decl)) && !TYPE_DECL_ALIAS_P (TYPE_NAME (TREE_TYPE (decl))) && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) break; /* Stop if we run into an explicitly specialized class template. */ if (!DECL_NAMESPACE_SCOPE_P (decl) && DECL_CONTEXT (decl) && CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (decl))) break; decl = DECL_TI_TEMPLATE (decl); } return decl; } /* Return the most specialized of the template partial specializations which can produce TARGET, a specialization of some class or variable template. The value returned is actually a TREE_LIST; the TREE_VALUE is a TEMPLATE_DECL node corresponding to the partial specialization, while the TREE_PURPOSE is the set of template arguments that must be substituted into the template pattern in order to generate TARGET. If the choice of partial specialization is ambiguous, a diagnostic is issued, and the error_mark_node is returned. If there are no partial specializations matching TARGET, then NULL_TREE is returned, indicating that the primary template should be used. */ tree most_specialized_partial_spec (tree target, tsubst_flags_t complain) { tree list = NULL_TREE; tree t; tree champ; int fate; bool ambiguous_p; tree outer_args = NULL_TREE; tree tmpl, args; if (TYPE_P (target)) { tree tinfo = CLASSTYPE_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else if (TREE_CODE (target) == TEMPLATE_ID_EXPR) { tmpl = TREE_OPERAND (target, 0); args = TREE_OPERAND (target, 1); } else if (VAR_P (target)) { tree tinfo = DECL_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else gcc_unreachable (); tree main_tmpl = most_general_template (tmpl); /* For determining which partial specialization to use, only the innermost args are interesting. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { outer_args = strip_innermost_template_args (args, 1); args = INNERMOST_TEMPLATE_ARGS (args); } /* The caller hasn't called push_to_top_level yet, but we need get_partial_spec_bindings to be done in non-template context so that we'll fully resolve everything. */ processing_template_decl_sentinel ptds; for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t)) { const tree ospec_tmpl = TREE_VALUE (t); tree spec_tmpl; if (outer_args) { /* Substitute in the template args from the enclosing class. */ ++processing_template_decl; spec_tmpl = tsubst (ospec_tmpl, outer_args, tf_none, NULL_TREE); --processing_template_decl; if (spec_tmpl == error_mark_node) return error_mark_node; } else spec_tmpl = ospec_tmpl; tree spec_args = get_partial_spec_bindings (tmpl, spec_tmpl, args); if (spec_args) { if (outer_args) spec_args = add_to_template_args (outer_args, spec_args); /* Keep the candidate only if the constraints are satisfied, or if we're not compiling with concepts. */ if (!flag_concepts || constraints_satisfied_p (ospec_tmpl, spec_args)) { list = tree_cons (spec_args, ospec_tmpl, list); TREE_TYPE (list) = TREE_TYPE (t); } } } if (! list) return NULL_TREE; ambiguous_p = false; t = list; champ = t; t = TREE_CHAIN (t); for (; t; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate == 1) ; else { if (fate == 0) { t = TREE_CHAIN (t); if (! t) { ambiguous_p = true; break; } } champ = t; } } if (!ambiguous_p) for (t = list; t && t != champ; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate != 1) { ambiguous_p = true; break; } } if (ambiguous_p) { const char *str; char *spaces = NULL; if (!(complain & tf_error)) return error_mark_node; if (TYPE_P (target)) error ("ambiguous template instantiation for %q#T", target); else error ("ambiguous template instantiation for %q#D", target); str = ngettext ("candidate is:", "candidates are:", list_length (list)); for (t = list; t; t = TREE_CHAIN (t)) { tree subst = build_tree_list (TREE_VALUE (t), TREE_PURPOSE (t)); inform (DECL_SOURCE_LOCATION (TREE_VALUE (t)), "%s %#qS", spaces ? spaces : str, subst); spaces = spaces ? spaces : get_spaces (str); } free (spaces); return error_mark_node; } return champ; } /* Explicitly instantiate DECL. */ void do_decl_instantiation (tree decl, tree storage) { tree result = NULL_TREE; int extern_p = 0; if (!decl || decl == error_mark_node) /* An error occurred, for which grokdeclarator has already issued an appropriate message. */ return; else if (! DECL_LANG_SPECIFIC (decl)) { error ("explicit instantiation of non-template %q#D", decl); return; } else if (DECL_DECLARED_CONCEPT_P (decl)) { if (VAR_P (decl)) error ("explicit instantiation of variable concept %q#D", decl); else error ("explicit instantiation of function concept %q#D", decl); return; } bool var_templ = (DECL_TEMPLATE_INFO (decl) && variable_template_p (DECL_TI_TEMPLATE (decl))); if (VAR_P (decl) && !var_templ) { /* There is an asymmetry here in the way VAR_DECLs and FUNCTION_DECLs are handled by grokdeclarator. In the case of the latter, the DECL we get back will be marked as a template instantiation, and the appropriate DECL_TEMPLATE_INFO will be set up. This does not happen for VAR_DECLs so we do the lookup here. Probably, grokdeclarator should handle VAR_DECLs as it currently handles FUNCTION_DECLs. */ if (!DECL_CLASS_SCOPE_P (decl)) { error ("%qD is not a static data member of a class template", decl); return; } result = lookup_field (DECL_CONTEXT (decl), DECL_NAME (decl), 0, false); if (!result || !VAR_P (result)) { error ("no matching template for %qD found", decl); return; } if (!same_type_p (TREE_TYPE (result), TREE_TYPE (decl))) { error ("type %qT for explicit instantiation %qD does not match " "declared type %qT", TREE_TYPE (result), decl, TREE_TYPE (decl)); return; } } else if (TREE_CODE (decl) != FUNCTION_DECL && !var_templ) { error ("explicit instantiation of %q#D", decl); return; } else result = decl; /* Check for various error cases. Note that if the explicit instantiation is valid the RESULT will currently be marked as an *implicit* instantiation; DECL_EXPLICIT_INSTANTIATION is not set until we get here. */ if (DECL_TEMPLATE_SPECIALIZATION (result)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (DECL_EXPLICIT_INSTANTIATION (result)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. We check DECL_NOT_REALLY_EXTERN so as not to complain when the first instantiation was `extern' and the second is not, and EXTERN_P for the opposite case. */ if (DECL_NOT_REALLY_EXTERN (result) && !extern_p) permerror (input_location, "duplicate explicit instantiation of %q#D", result); /* If an "extern" explicit instantiation follows an ordinary explicit instantiation, the template is instantiated. */ if (extern_p) return; } else if (!DECL_IMPLICIT_INSTANTIATION (result)) { error ("no matching template for %qD found", result); return; } else if (!DECL_TEMPLATE_INFO (result)) { permerror (input_location, "explicit instantiation of non-template %q#D", result); return; } if (storage == NULL_TREE) ; else if (storage == ridpointers[(int) RID_EXTERN]) { if (cxx_dialect == cxx98) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on explicit " "instantiations"); extern_p = 1; } else error ("storage class %qD applied to template instantiation", storage); check_explicit_instantiation_namespace (result); mark_decl_instantiated (result, extern_p); if (! extern_p) instantiate_decl (result, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } static void mark_class_instantiated (tree t, int extern_p) { SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t); SET_CLASSTYPE_INTERFACE_KNOWN (t); CLASSTYPE_INTERFACE_ONLY (t) = extern_p; TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = extern_p; if (! extern_p) { CLASSTYPE_DEBUG_REQUESTED (t) = 1; rest_of_type_compilation (t, 1); } } /* Called from do_type_instantiation through binding_table_foreach to do recursive instantiation for the type bound in ENTRY. */ static void bt_instantiate_type_proc (binding_entry entry, void *data) { tree storage = *(tree *) data; if (MAYBE_CLASS_TYPE_P (entry->type) && CLASSTYPE_TEMPLATE_INFO (entry->type) && !uses_template_parms (CLASSTYPE_TI_ARGS (entry->type))) do_type_instantiation (TYPE_MAIN_DECL (entry->type), storage, 0); } /* Perform an explicit instantiation of template class T. STORAGE, if non-null, is the RID for extern, inline or static. COMPLAIN is nonzero if this is called from the parser, zero if called recursively, since the standard is unclear (as detailed below). */ void do_type_instantiation (tree t, tree storage, tsubst_flags_t complain) { int extern_p = 0; int nomem_p = 0; int static_p = 0; int previous_instantiation_extern_p = 0; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (! CLASS_TYPE_P (t) || ! CLASSTYPE_TEMPLATE_INFO (t)) { tree tmpl = (TYPE_TEMPLATE_INFO (t)) ? TYPE_TI_TEMPLATE (t) : NULL; if (tmpl) error ("explicit instantiation of non-class template %qD", tmpl); else error ("explicit instantiation of non-template type %qT", t); return; } complete_type (t); if (!COMPLETE_TYPE_P (t)) { if (complain & tf_error) error ("explicit instantiation of %q#T before definition of template", t); return; } if (storage != NULL_TREE) { if (storage == ridpointers[(int) RID_EXTERN]) { if (cxx_dialect == cxx98) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on " "explicit instantiations"); } else pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids the use of %qE" " on explicit instantiations", storage); if (storage == ridpointers[(int) RID_INLINE]) nomem_p = 1; else if (storage == ridpointers[(int) RID_EXTERN]) extern_p = 1; else if (storage == ridpointers[(int) RID_STATIC]) static_p = 1; else { error ("storage class %qD applied to template instantiation", storage); extern_p = 0; } } if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (CLASSTYPE_EXPLICIT_INSTANTIATION (t)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. If PREVIOUS_INSTANTIATION_EXTERN_P, then the first explicit instantiation was `extern'. If EXTERN_P then the second is. These cases are OK. */ previous_instantiation_extern_p = CLASSTYPE_INTERFACE_ONLY (t); if (!previous_instantiation_extern_p && !extern_p && (complain & tf_error)) permerror (input_location, "duplicate explicit instantiation of %q#T", t); /* If we've already instantiated the template, just return now. */ if (!CLASSTYPE_INTERFACE_ONLY (t)) return; } check_explicit_instantiation_namespace (TYPE_NAME (t)); mark_class_instantiated (t, extern_p); if (nomem_p) return; /* In contrast to implicit instantiation, where only the declarations, and not the definitions, of members are instantiated, we have here: [temp.explicit] An explicit instantiation that names a class template specialization is also an explicit instantiation of the same kind (declaration or definition) of each of its members (not including members inherited from base classes and members that are templates) that has not been previously explicitly specialized in the translation unit containing the explicit instantiation, provided that the associated constraints, if any, of that member are satisfied by the template arguments of the explicit instantiation. */ for (tree fld = TYPE_FIELDS (t); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !static_p && user_provided_p (fld))) && DECL_TEMPLATE_INSTANTIATION (fld) && constraints_satisfied_p (fld)) { mark_decl_instantiated (fld, extern_p); if (! extern_p) instantiate_decl (fld, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/true); } if (CLASSTYPE_NESTED_UTDS (t)) binding_table_foreach (CLASSTYPE_NESTED_UTDS (t), bt_instantiate_type_proc, &storage); } /* Given a function DECL, which is a specialization of TMPL, modify DECL to be a re-instantiation of TMPL with the same template arguments. TMPL should be the template into which tsubst'ing should occur for DECL, not the most general template. One reason for doing this is a scenario like this: template <class T> void f(const T&, int i); void g() { f(3, 7); } template <class T> void f(const T& t, const int i) { } Note that when the template is first instantiated, with instantiate_template, the resulting DECL will have no name for the first parameter, and the wrong type for the second. So, when we go to instantiate the DECL, we regenerate it. */ static void regenerate_decl_from_template (tree decl, tree tmpl, tree args) { /* The arguments used to instantiate DECL, from the most general template. */ tree code_pattern; code_pattern = DECL_TEMPLATE_RESULT (tmpl); /* Make sure that we can see identifiers, and compute access correctly. */ push_access_scope (decl); if (TREE_CODE (decl) == FUNCTION_DECL) { tree decl_parm; tree pattern_parm; tree specs; int args_depth; int parms_depth; args_depth = TMPL_ARGS_DEPTH (args); parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (args_depth > parms_depth) args = get_innermost_template_args (args, parms_depth); /* Instantiate a dynamic exception-specification. noexcept will be handled below. */ if (tree raises = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (code_pattern))) if (TREE_VALUE (raises)) { specs = tsubst_exception_specification (TREE_TYPE (code_pattern), args, tf_error, NULL_TREE, /*defer_ok*/false); if (specs && specs != error_mark_node) TREE_TYPE (decl) = build_exception_variant (TREE_TYPE (decl), specs); } /* Merge parameter declarations. */ decl_parm = skip_artificial_parms_for (decl, DECL_ARGUMENTS (decl)); pattern_parm = skip_artificial_parms_for (code_pattern, DECL_ARGUMENTS (code_pattern)); while (decl_parm && !DECL_PACK_P (pattern_parm)) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) DECL_NAME (decl_parm) = DECL_NAME (pattern_parm); parm_type = tsubst (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); pattern_parm = DECL_CHAIN (pattern_parm); } /* Merge any parameters that match with the function parameter pack. */ if (pattern_parm && DECL_PACK_P (pattern_parm)) { int i, len; tree expanded_types; /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); len = TREE_VEC_LENGTH (expanded_types); for (i = 0; i < len; i++) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) /* Rename the parameter to include the index. */ DECL_NAME (decl_parm) = make_ith_pack_parameter_name (DECL_NAME (pattern_parm), i); parm_type = TREE_VEC_ELT (expanded_types, i); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); } } /* Merge additional specifiers from the CODE_PATTERN. */ if (DECL_DECLARED_INLINE_P (code_pattern) && !DECL_DECLARED_INLINE_P (decl)) DECL_DECLARED_INLINE_P (decl) = 1; maybe_instantiate_noexcept (decl, tf_error); } else if (VAR_P (decl)) { start_lambda_scope (decl); DECL_INITIAL (decl) = tsubst_init (DECL_INITIAL (code_pattern), decl, args, tf_error, DECL_TI_TEMPLATE (decl)); finish_lambda_scope (); if (VAR_HAD_UNKNOWN_BOUND (decl)) TREE_TYPE (decl) = tsubst (TREE_TYPE (code_pattern), args, tf_error, DECL_TI_TEMPLATE (decl)); } else gcc_unreachable (); pop_access_scope (decl); } /* Return the TEMPLATE_DECL into which DECL_TI_ARGS(DECL) should be substituted to get DECL. */ tree template_for_substitution (tree decl) { tree tmpl = DECL_TI_TEMPLATE (decl); /* Set TMPL to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. This is not always the most general template. Consider, for example: template <class T> struct S { template <class U> void f(); template <> void f<int>(); }; and an instantiation of S<double>::f<int>. We want TD to be the specialization S<T>::f<int>, not the more general S<T>::f<U>. */ while (/* An instantiation cannot have a definition, so we need a more general template. */ DECL_TEMPLATE_INSTANTIATION (tmpl) /* We must also deal with friend templates. Given: template <class T> struct S { template <class U> friend void f() {}; }; S<int>::f<U> say, is not an instantiation of S<T>::f<U>, so far as the language is concerned, but that's still where we get the pattern for the instantiation from. On other hand, if the definition comes outside the class, say: template <class T> struct S { template <class U> friend void f(); }; template <class U> friend void f() {} we don't need to look any further. That's what the check for DECL_INITIAL is for. */ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (tmpl) && !DECL_INITIAL (DECL_TEMPLATE_RESULT (tmpl)))) { /* The present template, TD, should not be a definition. If it were a definition, we should be using it! Note that we cannot restructure the loop to just keep going until we find a template with a definition, since that might go too far if a specialization was declared, but not defined. */ /* Fetch the more general template. */ tmpl = DECL_TI_TEMPLATE (tmpl); } return tmpl; } /* Returns true if we need to instantiate this template instance even if we know we aren't going to emit it. */ bool always_instantiate_p (tree decl) { /* We always instantiate inline functions so that we can inline them. An explicit instantiation declaration prohibits implicit instantiation of non-inline functions. With high levels of optimization, we would normally inline non-inline functions -- but we're not allowed to do that for "extern template" functions. Therefore, we check DECL_DECLARED_INLINE_P, rather than possibly_inlined_p. */ return ((TREE_CODE (decl) == FUNCTION_DECL && (DECL_DECLARED_INLINE_P (decl) || type_uses_auto (TREE_TYPE (TREE_TYPE (decl))))) /* And we need to instantiate static data members so that their initializers are available in integral constant expressions. */ || (VAR_P (decl) && decl_maybe_constant_var_p (decl))); } /* If FN has a noexcept-specifier that hasn't been instantiated yet, instantiate it now, modifying TREE_TYPE (fn). Returns false on error, true otherwise. */ bool maybe_instantiate_noexcept (tree fn, tsubst_flags_t complain) { tree fntype, spec, noex, clone; if (fn == error_mark_node) return false; /* Don't instantiate a noexcept-specification from template context. */ if (processing_template_decl && (!flag_noexcept_type || type_dependent_expression_p (fn))) return true; if (DECL_MAYBE_DELETED (fn)) { if (fn == current_function_decl) /* We're in start_preparsed_function, keep going. */ return true; ++function_depth; synthesize_method (fn); --function_depth; return !DECL_MAYBE_DELETED (fn); } if (DECL_CLONED_FUNCTION_P (fn)) fn = DECL_CLONED_FUNCTION (fn); tree orig_fn = NULL_TREE; /* For a member friend template we can get a TEMPLATE_DECL. Let's use its FUNCTION_DECL for the rest of this function -- push_access_scope doesn't accept TEMPLATE_DECLs. */ if (DECL_FUNCTION_TEMPLATE_P (fn)) { orig_fn = fn; fn = DECL_TEMPLATE_RESULT (fn); } fntype = TREE_TYPE (fn); spec = TYPE_RAISES_EXCEPTIONS (fntype); if (!spec || !TREE_PURPOSE (spec)) return true; noex = TREE_PURPOSE (spec); if (TREE_CODE (noex) == DEFERRED_NOEXCEPT) { static hash_set<tree>* fns = new hash_set<tree>; bool added = false; if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE) { spec = get_defaulted_eh_spec (fn, complain); if (spec == error_mark_node) /* This might have failed because of an unparsed DMI, so let's try again later. */ return false; } else if (!(added = !fns->add (fn))) { /* If hash_set::add returns true, the element was already there. */ location_t loc = cp_expr_loc_or_loc (DEFERRED_NOEXCEPT_PATTERN (noex), DECL_SOURCE_LOCATION (fn)); error_at (loc, "exception specification of %qD depends on itself", fn); spec = noexcept_false_spec; } else if (push_tinst_level (fn)) { push_to_top_level (); push_access_scope (fn); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (fn); /* If needed, set current_class_ptr for the benefit of tsubst_copy/PARM_DECL. */ tree tdecl = DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (fn)); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (tdecl)) { tree this_parm = DECL_ARGUMENTS (tdecl); current_class_ptr = NULL_TREE; current_class_ref = cp_build_fold_indirect_ref (this_parm); current_class_ptr = this_parm; } /* If this function is represented by a TEMPLATE_DECL, then the deferred noexcept-specification might still contain dependent types, even after substitution. And we need the dependency check functions to work in build_noexcept_spec. */ if (orig_fn) ++processing_template_decl; /* Do deferred instantiation of the noexcept-specifier. */ noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex), DEFERRED_NOEXCEPT_ARGS (noex), tf_warning_or_error, fn, /*function_p=*/false, /*i_c_e_p=*/true); /* Build up the noexcept-specification. */ spec = build_noexcept_spec (noex, tf_warning_or_error); if (orig_fn) --processing_template_decl; pop_deferring_access_checks (); pop_access_scope (fn); pop_tinst_level (); pop_from_top_level (); } else spec = noexcept_false_spec; if (added) fns->remove (fn); if (spec == error_mark_node) { /* This failed with a hard error, so let's go with false. */ gcc_assert (seen_error ()); spec = noexcept_false_spec; } TREE_TYPE (fn) = build_exception_variant (fntype, spec); if (orig_fn) TREE_TYPE (orig_fn) = TREE_TYPE (fn); } FOR_EACH_CLONE (clone, fn) { if (TREE_TYPE (clone) == fntype) TREE_TYPE (clone) = TREE_TYPE (fn); else TREE_TYPE (clone) = build_exception_variant (TREE_TYPE (clone), spec); } return true; } /* We're starting to process the function INST, an instantiation of PATTERN; add their parameters to local_specializations. */ static void register_parameter_specializations (tree pattern, tree inst) { tree tmpl_parm = DECL_ARGUMENTS (pattern); tree spec_parm = DECL_ARGUMENTS (inst); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (inst)) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = skip_artificial_parms_for (inst, spec_parm); tmpl_parm = skip_artificial_parms_for (pattern, tmpl_parm); } for (; tmpl_parm; tmpl_parm = DECL_CHAIN (tmpl_parm)) { if (!DECL_PACK_P (tmpl_parm)) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = DECL_CHAIN (spec_parm); } else { /* Register the (value) argument pack as a specialization of TMPL_PARM, then move on. */ tree argpack = extract_fnparm_pack (tmpl_parm, &spec_parm); register_local_specialization (argpack, tmpl_parm); } } gcc_assert (!spec_parm); } /* Produce the definition of D, a _DECL generated from a template. If DEFER_OK is true, then we don't have to actually do the instantiation now; we just have to do it sometime. Normally it is an error if this is an explicit instantiation but D is undefined. EXPL_INST_CLASS_MEM_P is true iff D is a member of an explicitly instantiated class template. */ tree instantiate_decl (tree d, bool defer_ok, bool expl_inst_class_mem_p) { tree tmpl = DECL_TI_TEMPLATE (d); tree gen_args; tree args; tree td; tree code_pattern; tree spec; tree gen_tmpl; bool pattern_defined; location_t saved_loc = input_location; int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; bool external_p; bool deleted_p; /* This function should only be used to instantiate templates for functions and static member variables. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (d)); /* A concept is never instantiated. */ gcc_assert (!DECL_DECLARED_CONCEPT_P (d)); /* Variables are never deferred; if instantiation is required, they are instantiated right away. That allows for better code in the case that an expression refers to the value of the variable -- if the variable has a constant value the referring expression can take advantage of that fact. */ if (VAR_P (d)) defer_ok = false; /* Don't instantiate cloned functions. Instead, instantiate the functions they cloned. */ if (TREE_CODE (d) == FUNCTION_DECL && DECL_CLONED_FUNCTION_P (d)) d = DECL_CLONED_FUNCTION (d); if (DECL_TEMPLATE_INSTANTIATED (d) || (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (d) && DECL_INITIAL (d)) || DECL_TEMPLATE_SPECIALIZATION (d)) /* D has already been instantiated or explicitly specialized, so there's nothing for us to do here. It might seem reasonable to check whether or not D is an explicit instantiation, and, if so, stop here. But when an explicit instantiation is deferred until the end of the compilation, DECL_EXPLICIT_INSTANTIATION is set, even though we still need to do the instantiation. */ return d; /* Check to see whether we know that this template will be instantiated in some other file, as with "extern template" extension. */ external_p = (DECL_INTERFACE_KNOWN (d) && DECL_REALLY_EXTERN (d)); /* In general, we do not instantiate such templates. */ if (external_p && !always_instantiate_p (d)) return d; gen_tmpl = most_general_template (tmpl); gen_args = DECL_TI_ARGS (d); if (tmpl != gen_tmpl) /* We should already have the extra args. */ gcc_assert (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) == TMPL_ARGS_DEPTH (gen_args)); /* And what's in the hash table should match D. */ gcc_assert ((spec = retrieve_specialization (gen_tmpl, gen_args, 0)) == d || spec == NULL_TREE); /* This needs to happen before any tsubsting. */ if (! push_tinst_level (d)) return d; timevar_push (TV_TEMPLATE_INST); /* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. */ td = template_for_substitution (d); args = gen_args; if (VAR_P (d)) { /* Look up an explicit specialization, if any. */ tree tid = lookup_template_variable (gen_tmpl, gen_args); tree elt = most_specialized_partial_spec (tid, tf_warning_or_error); if (elt && elt != error_mark_node) { td = TREE_VALUE (elt); args = TREE_PURPOSE (elt); } } code_pattern = DECL_TEMPLATE_RESULT (td); /* We should never be trying to instantiate a member of a class template or partial specialization. */ gcc_assert (d != code_pattern); if ((DECL_NAMESPACE_SCOPE_P (d) && !DECL_INITIALIZED_IN_CLASS_P (d)) || DECL_TEMPLATE_SPECIALIZATION (td)) /* In the case of a friend template whose definition is provided outside the class, we may have too many arguments. Drop the ones we don't need. The same is true for specializations. */ args = get_innermost_template_args (args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td))); if (TREE_CODE (d) == FUNCTION_DECL) { deleted_p = DECL_DELETED_FN (code_pattern); pattern_defined = ((DECL_SAVED_TREE (code_pattern) != NULL_TREE && DECL_INITIAL (code_pattern) != error_mark_node) || DECL_DEFAULTED_FN (code_pattern) || deleted_p); } else { deleted_p = false; if (DECL_CLASS_SCOPE_P (code_pattern)) pattern_defined = ! DECL_IN_AGGR_P (code_pattern); else pattern_defined = ! DECL_EXTERNAL (code_pattern); } /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); /* Unless an explicit instantiation directive has already determined the linkage of D, remember that a definition is available for this entity. */ if (pattern_defined && !DECL_INTERFACE_KNOWN (d) && !DECL_NOT_REALLY_EXTERN (d)) mark_definable (d); DECL_SOURCE_LOCATION (td) = DECL_SOURCE_LOCATION (code_pattern); DECL_SOURCE_LOCATION (d) = DECL_SOURCE_LOCATION (code_pattern); input_location = DECL_SOURCE_LOCATION (d); /* If D is a member of an explicitly instantiated class template, and no definition is available, treat it like an implicit instantiation. */ if (!pattern_defined && expl_inst_class_mem_p && DECL_EXPLICIT_INSTANTIATION (d)) { /* Leave linkage flags alone on instantiations with anonymous visibility. */ if (TREE_PUBLIC (d)) { DECL_NOT_REALLY_EXTERN (d) = 0; DECL_INTERFACE_KNOWN (d) = 0; } SET_DECL_IMPLICIT_INSTANTIATION (d); } /* Defer all other templates, unless we have been explicitly forbidden from doing so. */ if (/* If there is no definition, we cannot instantiate the template. */ ! pattern_defined /* If it's OK to postpone instantiation, do so. */ || defer_ok /* If this is a static data member that will be defined elsewhere, we don't want to instantiate the entire data member, but we do want to instantiate the initializer so that we can substitute that elsewhere. */ || (external_p && VAR_P (d)) /* Handle here a deleted function too, avoid generating its body (c++/61080). */ || deleted_p) { /* The definition of the static data member is now required so we must substitute the initializer. */ if (VAR_P (d) && !DECL_INITIAL (d) && DECL_INITIAL (code_pattern)) { tree ns; tree init; bool const_init = false; bool enter_context = DECL_CLASS_SCOPE_P (d); ns = decl_namespace_context (d); push_nested_namespace (ns); if (enter_context) push_nested_class (DECL_CONTEXT (d)); init = tsubst_expr (DECL_INITIAL (code_pattern), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); /* If instantiating the initializer involved instantiating this again, don't call cp_finish_decl twice. */ if (!DECL_INITIAL (d)) { /* Make sure the initializer is still constant, in case of circular dependency (template/instantiate6.C). */ const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); cp_finish_decl (d, init, /*init_const_expr_p=*/const_init, /*asmspec_tree=*/NULL_TREE, LOOKUP_ONLYCONVERTING); } if (enter_context) pop_nested_class (); pop_nested_namespace (ns); } /* We restore the source position here because it's used by add_pending_template. */ input_location = saved_loc; if (at_eof && !pattern_defined && DECL_EXPLICIT_INSTANTIATION (d) && DECL_NOT_REALLY_EXTERN (d)) /* [temp.explicit] The definition of a non-exported function template, a non-exported member function template, or a non-exported member function or static data member of a class template shall be present in every translation unit in which it is explicitly instantiated. */ permerror (input_location, "explicit instantiation of %qD " "but no definition available", d); /* If we're in unevaluated context, we just wanted to get the constant value; this isn't an odr use, so don't queue a full instantiation. */ if (cp_unevaluated_operand != 0) goto out; /* ??? Historically, we have instantiated inline functions, even when marked as "extern template". */ if (!(external_p && VAR_P (d))) add_pending_template (d); goto out; } bool push_to_top, nested; tree fn_context; fn_context = decl_function_context (d); if (LAMBDA_FUNCTION_P (d)) /* tsubst_lambda_expr resolved any references to enclosing functions. */ fn_context = NULL_TREE; nested = current_function_decl != NULL_TREE; push_to_top = !(nested && fn_context == current_function_decl); vec<tree> omp_privatization_save; if (nested) save_omp_privatization_clauses (omp_privatization_save); if (push_to_top) push_to_top_level (); else { gcc_assert (!processing_template_decl); push_function_context (); cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } if (VAR_P (d)) { /* The variable might be a lambda's extra scope, and that lambda's visibility depends on D's. */ maybe_commonize_var (d); determine_visibility (d); } /* Mark D as instantiated so that recursive calls to instantiate_decl do not try to instantiate it again. */ DECL_TEMPLATE_INSTANTIATED (d) = 1; /* Regenerate the declaration in case the template has been modified by a subsequent redeclaration. */ regenerate_decl_from_template (d, td, args); /* We already set the file and line above. Reset them now in case they changed as a result of calling regenerate_decl_from_template. */ input_location = DECL_SOURCE_LOCATION (d); if (VAR_P (d)) { tree init; bool const_init = false; /* Clear out DECL_RTL; whatever was there before may not be right since we've reset the type of the declaration. */ SET_DECL_RTL (d, NULL); DECL_IN_AGGR_P (d) = 0; /* The initializer is placed in DECL_INITIAL by regenerate_decl_from_template so we don't need to push/pop_access_scope again here. Pull it out so that cp_finish_decl can process it. */ init = DECL_INITIAL (d); DECL_INITIAL (d) = NULL_TREE; DECL_INITIALIZED_P (d) = 0; /* Clear DECL_EXTERNAL so that cp_finish_decl will process the initializer. That function will defer actual emission until we have a chance to determine linkage. */ DECL_EXTERNAL (d) = 0; /* Enter the scope of D so that access-checking works correctly. */ bool enter_context = DECL_CLASS_SCOPE_P (d); if (enter_context) push_nested_class (DECL_CONTEXT (d)); const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); int flags = (TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (d)) ? LOOKUP_CONSTINIT : 0); cp_finish_decl (d, init, const_init, NULL_TREE, flags); if (enter_context) pop_nested_class (); if (variable_template_p (gen_tmpl)) note_variable_template_instantiation (d); } else if (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (code_pattern)) synthesize_method (d); else if (TREE_CODE (d) == FUNCTION_DECL) { /* Set up the list of local specializations. */ local_specialization_stack lss (push_to_top ? lss_blank : lss_copy); tree block = NULL_TREE; /* Set up context. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern) && TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL) block = push_stmt_list (); else start_preparsed_function (d, NULL_TREE, SF_PRE_PARSED); /* Some typedefs referenced from within the template code need to be access checked at template instantiation time, i.e now. These types were added to the template at parsing time. Let's get those and perform the access checks then. */ perform_typedefs_access_check (DECL_TEMPLATE_RESULT (td), args); /* Create substitution entries for the parameters. */ register_parameter_specializations (code_pattern, d); /* Substitute into the body of the function. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) tsubst_omp_udr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, tmpl); else { tsubst_expr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, tmpl, /*integral_constant_expression_p=*/false); /* Set the current input_location to the end of the function so that finish_function knows where we are. */ input_location = DECL_STRUCT_FUNCTION (code_pattern)->function_end_locus; /* Remember if we saw an infinite loop in the template. */ current_function_infinite_loop = DECL_STRUCT_FUNCTION (code_pattern)->language->infinite_loop; } /* Finish the function. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern) && TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL) DECL_SAVED_TREE (d) = pop_stmt_list (block); else { d = finish_function (/*inline_p=*/false); expand_or_defer_fn (d); } if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) cp_check_omp_declare_reduction (d); } /* We're not deferring instantiation any more. */ TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0; if (push_to_top) pop_from_top_level (); else pop_function_context (); if (nested) restore_omp_privatization_clauses (omp_privatization_save); out: pop_deferring_access_checks (); timevar_pop (TV_TEMPLATE_INST); pop_tinst_level (); input_location = saved_loc; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return d; } /* Run through the list of templates that we wish we could instantiate, and instantiate any we can. RETRIES is the number of times we retry pending template instantiation. */ void instantiate_pending_templates (int retries) { int reconsider; location_t saved_loc = input_location; /* Instantiating templates may trigger vtable generation. This in turn may require further template instantiations. We place a limit here to avoid infinite loop. */ if (pending_templates && retries >= max_tinst_depth) { tree decl = pending_templates->tinst->maybe_get_node (); fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " instantiating %q+D, possibly from virtual table generation" " (use %<-ftemplate-depth=%> to increase the maximum)", max_tinst_depth, decl); if (TREE_CODE (decl) == FUNCTION_DECL) /* Pretend that we defined it. */ DECL_INITIAL (decl) = error_mark_node; return; } do { struct pending_template **t = &pending_templates; struct pending_template *last = NULL; reconsider = 0; while (*t) { tree instantiation = reopen_tinst_level ((*t)->tinst); bool complete = false; if (TYPE_P (instantiation)) { if (!COMPLETE_TYPE_P (instantiation)) { instantiate_class_template (instantiation); if (CLASSTYPE_TEMPLATE_INSTANTIATION (instantiation)) for (tree fld = TYPE_FIELDS (instantiation); fld; fld = TREE_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (COMPLETE_TYPE_P (instantiation)) reconsider = 1; } complete = COMPLETE_TYPE_P (instantiation); } else { if (!DECL_TEMPLATE_SPECIALIZATION (instantiation) && !DECL_TEMPLATE_INSTANTIATED (instantiation)) { instantiation = instantiate_decl (instantiation, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (DECL_TEMPLATE_INSTANTIATED (instantiation)) reconsider = 1; } complete = (DECL_TEMPLATE_SPECIALIZATION (instantiation) || DECL_TEMPLATE_INSTANTIATED (instantiation)); } if (complete) { /* If INSTANTIATION has been instantiated, then we don't need to consider it again in the future. */ struct pending_template *drop = *t; *t = (*t)->next; set_refcount_ptr (drop->tinst); pending_template_freelist ().free (drop); } else { last = *t; t = &(*t)->next; } tinst_depth = 0; set_refcount_ptr (current_tinst_level); } last_pending_template = last; } while (reconsider); input_location = saved_loc; } /* Substitute ARGVEC into T, which is a list of initializers for either base class or a non-static data member. The TREE_PURPOSEs are DECLs, and the TREE_VALUEs are the initializer values. Used by instantiate_decl. */ static tree tsubst_initializer_list (tree t, tree argvec) { tree inits = NULL_TREE; tree target_ctor = error_mark_node; for (; t; t = TREE_CHAIN (t)) { tree decl; tree init; tree expanded_bases = NULL_TREE; tree expanded_arguments = NULL_TREE; int i, len = 1; if (TREE_CODE (TREE_PURPOSE (t)) == TYPE_PACK_EXPANSION) { tree expr; tree arg; /* Expand the base class expansion type into separate base classes. */ expanded_bases = tsubst_pack_expansion (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; /* We'll be building separate TREE_LISTs of arguments for each base. */ len = TREE_VEC_LENGTH (expanded_bases); expanded_arguments = make_tree_vec (len); for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = NULL_TREE; /* Build a dummy EXPR_PACK_EXPANSION that will be used to expand each argument in the TREE_VALUE of t. */ expr = make_node (EXPR_PACK_EXPANSION); PACK_EXPANSION_LOCAL_P (expr) = true; PACK_EXPANSION_PARAMETER_PACKS (expr) = PACK_EXPANSION_PARAMETER_PACKS (TREE_PURPOSE (t)); if (TREE_VALUE (t) == void_type_node) /* VOID_TYPE_NODE is used to indicate value-initialization. */ { for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = void_type_node; } else { /* Substitute parameter packs into each argument in the TREE_LIST. */ in_base_initializer = 1; for (arg = TREE_VALUE (t); arg; arg = TREE_CHAIN (arg)) { tree expanded_exprs; /* Expand the argument. */ SET_PACK_EXPANSION_PATTERN (expr, TREE_VALUE (arg)); expanded_exprs = tsubst_pack_expansion (expr, argvec, tf_warning_or_error, NULL_TREE); if (expanded_exprs == error_mark_node) continue; /* Prepend each of the expanded expressions to the corresponding TREE_LIST in EXPANDED_ARGUMENTS. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = tree_cons (NULL_TREE, TREE_VEC_ELT (expanded_exprs, i), TREE_VEC_ELT (expanded_arguments, i)); } } in_base_initializer = 0; /* Reverse all of the TREE_LISTs in EXPANDED_ARGUMENTS, since we built them backwards. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = nreverse (TREE_VEC_ELT (expanded_arguments, i)); } } } for (i = 0; i < len; ++i) { if (expanded_bases) { decl = TREE_VEC_ELT (expanded_bases, i); decl = expand_member_init (decl); init = TREE_VEC_ELT (expanded_arguments, i); } else { tree tmp; decl = tsubst_copy (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); decl = expand_member_init (decl); if (decl && !DECL_P (decl)) in_base_initializer = 1; init = TREE_VALUE (t); tmp = init; if (init != void_type_node) init = tsubst_expr (init, argvec, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); if (init == NULL_TREE && tmp != NULL_TREE) /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = void_type_node; in_base_initializer = 0; } if (target_ctor != error_mark_node && init != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", decl); return inits; } /* Look for a target constructor. */ if (init != error_mark_node && decl && CLASS_TYPE_P (decl) && same_type_p (decl, current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (inits) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (inits)); continue; } target_ctor = init; } if (decl) { init = build_tree_list (decl, init); TREE_CHAIN (init) = inits; inits = init; } } } return inits; } /* Set CURRENT_ACCESS_SPECIFIER based on the protection of DECL. */ static void set_current_access_from_decl (tree decl) { if (TREE_PRIVATE (decl)) current_access_specifier = access_private_node; else if (TREE_PROTECTED (decl)) current_access_specifier = access_protected_node; else current_access_specifier = access_public_node; } /* Instantiate an enumerated type. TAG is the template type, NEWTAG is the instantiation (which should have been created with start_enum) and ARGS are the template arguments to use. */ static void tsubst_enum (tree tag, tree newtag, tree args) { tree e; if (SCOPED_ENUM_P (newtag)) begin_scope (sk_scoped_enum, newtag); for (e = TYPE_VALUES (tag); e; e = TREE_CHAIN (e)) { tree value; tree decl; decl = TREE_VALUE (e); /* Note that in a template enum, the TREE_VALUE is the CONST_DECL, not the corresponding INTEGER_CST. */ value = tsubst_expr (DECL_INITIAL (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); /* Give this enumeration constant the correct access. */ set_current_access_from_decl (decl); /* Actually build the enumerator itself. Here we're assuming that enumerators can't have dependent attributes. */ build_enumerator (DECL_NAME (decl), value, newtag, DECL_ATTRIBUTES (decl), DECL_SOURCE_LOCATION (decl)); } if (SCOPED_ENUM_P (newtag)) finish_scope (); finish_enum_value_list (newtag); finish_enum (newtag); DECL_SOURCE_LOCATION (TYPE_NAME (newtag)) = DECL_SOURCE_LOCATION (TYPE_NAME (tag)); } /* DECL is a FUNCTION_DECL that is a template specialization. Return its type -- but without substituting the innermost set of template arguments. So, innermost set of template parameters will appear in the type. */ tree get_mostly_instantiated_function_type (tree decl) { /* For a function, DECL_TI_TEMPLATE is partially instantiated. */ return TREE_TYPE (DECL_TI_TEMPLATE (decl)); } /* Return truthvalue if we're processing a template different from the last one involved in diagnostics. */ bool problematic_instantiation_changed (void) { return current_tinst_level != last_error_tinst_level; } /* Remember current template involved in diagnostics. */ void record_last_problematic_instantiation (void) { set_refcount_ptr (last_error_tinst_level, current_tinst_level); } struct tinst_level * current_instantiation (void) { return current_tinst_level; } /* Return TRUE if current_function_decl is being instantiated, false otherwise. */ bool instantiating_current_function_p (void) { return (current_instantiation () && (current_instantiation ()->maybe_get_node () == current_function_decl)); } /* [temp.param] Check that template non-type parm TYPE is of an allowable type. Return false for ok, true for disallowed. Issue error and inform messages under control of COMPLAIN. */ static bool invalid_nontype_parm_type_p (tree type, tsubst_flags_t complain) { if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)) return false; else if (TYPE_PTR_P (type)) return false; else if (TYPE_REF_P (type) && !TYPE_REF_IS_RVALUE (type)) return false; else if (TYPE_PTRMEM_P (type)) return false; else if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) { if (CLASS_PLACEHOLDER_TEMPLATE (type) && cxx_dialect < cxx2a) { if (complain & tf_error) error ("non-type template parameters of deduced class type only " "available with %<-std=c++2a%> or %<-std=gnu++2a%>"); return true; } return false; } else if (TREE_CODE (type) == TYPENAME_TYPE) return false; else if (TREE_CODE (type) == DECLTYPE_TYPE) return false; else if (TREE_CODE (type) == NULLPTR_TYPE) return false; /* A bound template template parm could later be instantiated to have a valid nontype parm type via an alias template. */ else if (cxx_dialect >= cxx11 && TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return false; else if (CLASS_TYPE_P (type)) { if (cxx_dialect < cxx2a) { if (complain & tf_error) error ("non-type template parameters of class type only available " "with %<-std=c++2a%> or %<-std=gnu++2a%>"); return true; } if (dependent_type_p (type)) return false; if (!complete_type_or_else (type, NULL_TREE)) return true; if (!structural_type_p (type)) { if (complain & tf_error) { auto_diagnostic_group d; error ("%qT is not a valid type for a template non-type " "parameter because it is not structural", type); structural_type_p (type, true); } return true; } return false; } if (complain & tf_error) { if (type == error_mark_node) inform (input_location, "invalid template non-type parameter"); else error ("%q#T is not a valid type for a template non-type parameter", type); } return true; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Assumes that TYPE really is a type, and not the ERROR_MARK_NODE.*/ static bool dependent_type_p_r (tree type) { tree scope; /* [temp.dep.type] A type is dependent if it is: -- a template parameter. Template template parameters are types for us (since TYPE_P holds true for them) so we handle them here. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM) return true; /* -- a qualified-id with a nested-name-specifier which contains a class-name that names a dependent type or whose unqualified-id names a dependent type. */ if (TREE_CODE (type) == TYPENAME_TYPE) return true; /* An alias template specialization can be dependent even if the resulting type is not. */ if (dependent_alias_template_spec_p (type, nt_transparent)) return true; /* -- a cv-qualified type where the cv-unqualified type is dependent. No code is necessary for this bullet; the code below handles cv-qualified types, and we don't want to strip aliases with TYPE_MAIN_VARIANT because of DR 1558. */ /* -- a compound type constructed from any dependent type. */ if (TYPE_PTRMEM_P (type)) return (dependent_type_p (TYPE_PTRMEM_CLASS_TYPE (type)) || dependent_type_p (TYPE_PTRMEM_POINTED_TO_TYPE (type))); else if (INDIRECT_TYPE_P (type)) return dependent_type_p (TREE_TYPE (type)); else if (FUNC_OR_METHOD_TYPE_P (type)) { tree arg_type; if (dependent_type_p (TREE_TYPE (type))) return true; for (arg_type = TYPE_ARG_TYPES (type); arg_type; arg_type = TREE_CHAIN (arg_type)) if (dependent_type_p (TREE_VALUE (arg_type))) return true; if (cxx_dialect >= cxx17) /* A value-dependent noexcept-specifier makes the type dependent. */ if (tree spec = TYPE_RAISES_EXCEPTIONS (type)) if (tree noex = TREE_PURPOSE (spec)) /* Treat DEFERRED_NOEXCEPT as non-dependent, since it doesn't affect overload resolution and treating it as dependent breaks things. Same for an unparsed noexcept expression. */ if (TREE_CODE (noex) != DEFERRED_NOEXCEPT && TREE_CODE (noex) != DEFERRED_PARSE && value_dependent_expression_p (noex)) return true; return false; } /* -- an array type constructed from any dependent type or whose size is specified by a constant expression that is value-dependent. We checked for type- and value-dependence of the bounds in compute_array_index_type, so TYPE_DEPENDENT_P is already set. */ if (TREE_CODE (type) == ARRAY_TYPE) { if (TYPE_DOMAIN (type) && dependent_type_p (TYPE_DOMAIN (type))) return true; return dependent_type_p (TREE_TYPE (type)); } /* -- a template-id in which either the template name is a template parameter ... */ if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* ... or any of the template arguments is a dependent type or an expression that is type-dependent or value-dependent. */ else if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))))) return true; /* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are dependent; if the argument of the `typeof' expression is not type-dependent, then it should already been have resolved. */ if (TREE_CODE (type) == TYPEOF_TYPE || TREE_CODE (type) == DECLTYPE_TYPE || TREE_CODE (type) == UNDERLYING_TYPE) return true; /* A template argument pack is dependent if any of its packed arguments are. */ if (TREE_CODE (type) == TYPE_ARGUMENT_PACK) { tree args = ARGUMENT_PACK_ARGS (type); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } /* All TYPE_PACK_EXPANSIONs are dependent, because parameter packs must be template parameters. */ if (TREE_CODE (type) == TYPE_PACK_EXPANSION) return true; if (any_dependent_type_attributes_p (TYPE_ATTRIBUTES (type))) return true; /* The standard does not specifically mention types that are local to template functions or local classes, but they should be considered dependent too. For example: template <int I> void f() { enum E { a = I }; S<sizeof (E)> s; } The size of `E' cannot be known until the value of `I' has been determined. Therefore, `E' must be considered dependent. */ scope = TYPE_CONTEXT (type); if (scope && TYPE_P (scope)) return dependent_type_p (scope); /* Don't use type_dependent_expression_p here, as it can lead to infinite recursion trying to determine whether a lambda nested in a lambda is dependent (c++/47687). */ else if (scope && TREE_CODE (scope) == FUNCTION_DECL && DECL_LANG_SPECIFIC (scope) && DECL_TEMPLATE_INFO (scope) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (scope))))) return true; /* Other types are non-dependent. */ return false; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Note that a NULL type is considered dependent. */ bool dependent_type_p (tree type) { /* If there are no template parameters in scope, then there can't be any dependent types. */ if (!processing_template_decl) { /* If we are not processing a template, then nobody should be providing us with a dependent type. */ gcc_assert (type); gcc_assert (TREE_CODE (type) != TEMPLATE_TYPE_PARM || is_auto (type)); return false; } /* If the type is NULL, we have not computed a type for the entity in question; in that case, the type is dependent. */ if (!type) return true; /* Erroneous types can be considered non-dependent. */ if (type == error_mark_node) return false; /* Getting here with global_type_node means we improperly called this function on the TREE_TYPE of an IDENTIFIER_NODE. */ gcc_checking_assert (type != global_type_node); /* If we have not already computed the appropriate value for TYPE, do so now. */ if (!TYPE_DEPENDENT_P_VALID (type)) { TYPE_DEPENDENT_P (type) = dependent_type_p_r (type); TYPE_DEPENDENT_P_VALID (type) = 1; } return TYPE_DEPENDENT_P (type); } /* Returns TRUE if SCOPE is a dependent scope, in which we can't do any lookup. In other words, a dependent type that is not the current instantiation. */ bool dependent_scope_p (tree scope) { return (scope && TYPE_P (scope) && dependent_type_p (scope) && !currently_open_class (scope)); } /* T is a SCOPE_REF. Return whether it represents a non-static member of an unknown base of 'this' (and is therefore instantiation-dependent). */ static bool unknown_base_ref_p (tree t) { if (!current_class_ptr) return false; tree mem = TREE_OPERAND (t, 1); if (shared_member_p (mem)) return false; tree cur = current_nonlambda_class_type (); if (!any_dependent_bases_p (cur)) return false; tree ctx = TREE_OPERAND (t, 0); if (DERIVED_FROM_P (ctx, cur)) return false; return true; } /* T is a SCOPE_REF; return whether we need to consider it instantiation-dependent so that we can check access at instantiation time even though we know which member it resolves to. */ static bool instantiation_dependent_scope_ref_p (tree t) { if (DECL_P (TREE_OPERAND (t, 1)) && CLASS_TYPE_P (TREE_OPERAND (t, 0)) && !unknown_base_ref_p (t) && accessible_in_template_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) return false; else return true; } /* Returns TRUE if the EXPRESSION is value-dependent, in the sense of [temp.dep.constexpr]. EXPRESSION is already known to be a constant expression. */ /* Note that this predicate is not appropriate for general expressions; only constant expressions (that satisfy potential_constant_expression) can be tested for value dependence. */ bool value_dependent_expression_p (tree expression) { if (!processing_template_decl || expression == NULL_TREE) return false; /* A type-dependent expression is also value-dependent. */ if (type_dependent_expression_p (expression)) return true; switch (TREE_CODE (expression)) { case BASELINK: /* A dependent member function of the current instantiation. */ return dependent_type_p (BINFO_TYPE (BASELINK_BINFO (expression))); case FUNCTION_DECL: /* A dependent member function of the current instantiation. */ if (DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) return true; break; case IDENTIFIER_NODE: /* A name that has not been looked up -- must be dependent. */ return true; case TEMPLATE_PARM_INDEX: /* A non-type template parm. */ return true; case CONST_DECL: /* A non-type template parm. */ if (DECL_TEMPLATE_PARM_P (expression)) return true; return value_dependent_expression_p (DECL_INITIAL (expression)); case VAR_DECL: /* A constant with literal type and is initialized with an expression that is value-dependent. */ if (DECL_DEPENDENT_INIT_P (expression) /* FIXME cp_finish_decl doesn't fold reference initializers. */ || TYPE_REF_P (TREE_TYPE (expression))) return true; if (DECL_HAS_VALUE_EXPR_P (expression)) { tree value_expr = DECL_VALUE_EXPR (expression); if (value_dependent_expression_p (value_expr) /* __PRETTY_FUNCTION__ inside a template function is dependent on the name of the function. */ || (DECL_PRETTY_FUNCTION_P (expression) /* It might be used in a template, but not a template function, in which case its DECL_VALUE_EXPR will be "top level". */ && value_expr == error_mark_node)) return true; } return false; case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: case CONST_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: /* These expressions are value-dependent if the type to which the cast occurs is dependent or the expression being casted is value-dependent. */ { tree type = TREE_TYPE (expression); if (dependent_type_p (type)) return true; /* A functional cast has a list of operands. */ expression = TREE_OPERAND (expression, 0); if (!expression) { /* If there are no operands, it must be an expression such as "int()". This should not happen for aggregate types because it would form non-constant expressions. */ gcc_assert (cxx_dialect >= cxx11 || INTEGRAL_OR_ENUMERATION_TYPE_P (type)); return false; } if (TREE_CODE (expression) == TREE_LIST) return any_value_dependent_elements_p (expression); return value_dependent_expression_p (expression); } case SIZEOF_EXPR: if (SIZEOF_EXPR_TYPE_P (expression)) return dependent_type_p (TREE_TYPE (TREE_OPERAND (expression, 0))); /* FALLTHRU */ case ALIGNOF_EXPR: case TYPEID_EXPR: /* A `sizeof' expression is value-dependent if the operand is type-dependent or is a pack expansion. */ expression = TREE_OPERAND (expression, 0); if (PACK_EXPANSION_P (expression)) return true; else if (TYPE_P (expression)) return dependent_type_p (expression); return instantiation_dependent_uneval_expression_p (expression); case AT_ENCODE_EXPR: /* An 'encode' expression is value-dependent if the operand is type-dependent. */ expression = TREE_OPERAND (expression, 0); return dependent_type_p (expression); case NOEXCEPT_EXPR: expression = TREE_OPERAND (expression, 0); return instantiation_dependent_uneval_expression_p (expression); case SCOPE_REF: /* All instantiation-dependent expressions should also be considered value-dependent. */ return instantiation_dependent_scope_ref_p (expression); case COMPONENT_REF: return (value_dependent_expression_p (TREE_OPERAND (expression, 0)) || value_dependent_expression_p (TREE_OPERAND (expression, 1))); case NONTYPE_ARGUMENT_PACK: /* A NONTYPE_ARGUMENT_PACK is value-dependent if any packed argument is value-dependent. */ { tree values = ARGUMENT_PACK_ARGS (expression); int i, len = TREE_VEC_LENGTH (values); for (i = 0; i < len; ++i) if (value_dependent_expression_p (TREE_VEC_ELT (values, i))) return true; return false; } case TRAIT_EXPR: { tree type2 = TRAIT_EXPR_TYPE2 (expression); if (dependent_type_p (TRAIT_EXPR_TYPE1 (expression))) return true; if (!type2) return false; if (TREE_CODE (type2) != TREE_LIST) return dependent_type_p (type2); for (; type2; type2 = TREE_CHAIN (type2)) if (dependent_type_p (TREE_VALUE (type2))) return true; return false; } case MODOP_EXPR: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 2)))); case ARRAY_REF: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 1)))); case ADDR_EXPR: { tree op = TREE_OPERAND (expression, 0); return (value_dependent_expression_p (op) || has_value_dependent_address (op)); } case REQUIRES_EXPR: /* Treat all requires-expressions as value-dependent so we don't try to fold them. */ return true; case TYPE_REQ: return dependent_type_p (TREE_OPERAND (expression, 0)); case CALL_EXPR: { if (value_dependent_expression_p (CALL_EXPR_FN (expression))) return true; tree fn = get_callee_fndecl (expression); int i, nargs; nargs = call_expr_nargs (expression); for (i = 0; i < nargs; ++i) { tree op = CALL_EXPR_ARG (expression, i); /* In a call to a constexpr member function, look through the implicit ADDR_EXPR on the object argument so that it doesn't cause the call to be considered value-dependent. We also look through it in potential_constant_expression. */ if (i == 0 && fn && DECL_DECLARED_CONSTEXPR_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && TREE_CODE (op) == ADDR_EXPR) op = TREE_OPERAND (op, 0); if (value_dependent_expression_p (op)) return true; } return false; } case TEMPLATE_ID_EXPR: return concept_definition_p (TREE_OPERAND (expression, 0)); case CONSTRUCTOR: { unsigned ix; tree val; if (dependent_type_p (TREE_TYPE (expression))) return true; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), ix, val) if (value_dependent_expression_p (val)) return true; return false; } case STMT_EXPR: /* Treat a GNU statement expression as dependent to avoid crashing under instantiate_non_dependent_expr; it can't be constant. */ return true; default: /* A constant expression is value-dependent if any subexpression is value-dependent. */ switch (TREE_CODE_CLASS (TREE_CODE (expression))) { case tcc_reference: case tcc_unary: case tcc_comparison: case tcc_binary: case tcc_expression: case tcc_vl_exp: { int i, len = cp_tree_operand_length (expression); for (i = 0; i < len; i++) { tree t = TREE_OPERAND (expression, i); /* In some cases, some of the operands may be missing. (For example, in the case of PREDECREMENT_EXPR, the amount to increment by may be missing.) That doesn't make the expression dependent. */ if (t && value_dependent_expression_p (t)) return true; } } break; default: break; } break; } /* The expression is not value-dependent. */ return false; } /* Returns TRUE if the EXPRESSION is type-dependent, in the sense of [temp.dep.expr]. Note that an expression with no type is considered dependent. Other parts of the compiler arrange for an expression with type-dependent subexpressions to have no type, so this function doesn't have to be fully recursive. */ bool type_dependent_expression_p (tree expression) { if (!processing_template_decl) return false; if (expression == NULL_TREE || expression == error_mark_node) return false; STRIP_ANY_LOCATION_WRAPPER (expression); /* An unresolved name is always dependent. */ if (identifier_p (expression) || TREE_CODE (expression) == USING_DECL || TREE_CODE (expression) == WILDCARD_DECL) return true; /* A lambda-expression in template context is dependent. dependent_type_p is true for a lambda in the scope of a class or function template, but that doesn't cover all template contexts, like a default template argument. */ if (TREE_CODE (expression) == LAMBDA_EXPR) return true; /* A fold expression is type-dependent. */ if (TREE_CODE (expression) == UNARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == UNARY_RIGHT_FOLD_EXPR || TREE_CODE (expression) == BINARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == BINARY_RIGHT_FOLD_EXPR) return true; /* Some expression forms are never type-dependent. */ if (TREE_CODE (expression) == PSEUDO_DTOR_EXPR || TREE_CODE (expression) == SIZEOF_EXPR || TREE_CODE (expression) == ALIGNOF_EXPR || TREE_CODE (expression) == AT_ENCODE_EXPR || TREE_CODE (expression) == NOEXCEPT_EXPR || TREE_CODE (expression) == TRAIT_EXPR || TREE_CODE (expression) == TYPEID_EXPR || TREE_CODE (expression) == DELETE_EXPR || TREE_CODE (expression) == VEC_DELETE_EXPR || TREE_CODE (expression) == THROW_EXPR || TREE_CODE (expression) == REQUIRES_EXPR) return false; /* The types of these expressions depends only on the type to which the cast occurs. */ if (TREE_CODE (expression) == DYNAMIC_CAST_EXPR || TREE_CODE (expression) == STATIC_CAST_EXPR || TREE_CODE (expression) == CONST_CAST_EXPR || TREE_CODE (expression) == REINTERPRET_CAST_EXPR || TREE_CODE (expression) == IMPLICIT_CONV_EXPR || TREE_CODE (expression) == CAST_EXPR) return dependent_type_p (TREE_TYPE (expression)); /* The types of these expressions depends only on the type created by the expression. */ if (TREE_CODE (expression) == NEW_EXPR || TREE_CODE (expression) == VEC_NEW_EXPR) { /* For NEW_EXPR tree nodes created inside a template, either the object type itself or a TREE_LIST may appear as the operand 1. */ tree type = TREE_OPERAND (expression, 1); if (TREE_CODE (type) == TREE_LIST) /* This is an array type. We need to check array dimensions as well. */ return dependent_type_p (TREE_VALUE (TREE_PURPOSE (type))) || value_dependent_expression_p (TREE_OPERAND (TREE_VALUE (type), 1)); else return dependent_type_p (type); } if (TREE_CODE (expression) == SCOPE_REF) { tree scope = TREE_OPERAND (expression, 0); tree name = TREE_OPERAND (expression, 1); /* 14.6.2.2 [temp.dep.expr]: An id-expression is type-dependent if it contains an identifier associated by name lookup with one or more declarations declared with a dependent type, or...a nested-name-specifier or qualified-id that names a member of an unknown specialization. */ return (type_dependent_expression_p (name) || dependent_scope_p (scope)); } if (TREE_CODE (expression) == TEMPLATE_DECL && !DECL_TEMPLATE_TEMPLATE_PARM_P (expression)) return uses_outer_template_parms (expression); if (TREE_CODE (expression) == STMT_EXPR) expression = stmt_expr_value_expr (expression); if (BRACE_ENCLOSED_INITIALIZER_P (expression)) { tree elt; unsigned i; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), i, elt) { if (type_dependent_expression_p (elt)) return true; } return false; } /* A static data member of the current instantiation with incomplete array type is type-dependent, as the definition and specializations can have different bounds. */ if (VAR_P (expression) && DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression)) && VAR_HAD_UNKNOWN_BOUND (expression)) return true; /* An array of unknown bound depending on a variadic parameter, eg: template<typename... Args> void foo (Args... args) { int arr[] = { args... }; } template<int... vals> void bar () { int arr[] = { vals... }; } If the array has no length and has an initializer, it must be that we couldn't determine its length in cp_complete_array_type because it is dependent. */ if (VAR_P (expression) && TREE_TYPE (expression) != NULL_TREE && TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (expression)) && DECL_INITIAL (expression)) return true; /* A function or variable template-id is type-dependent if it has any dependent template arguments. */ if (VAR_OR_FUNCTION_DECL_P (expression) && DECL_LANG_SPECIFIC (expression) && DECL_TEMPLATE_INFO (expression)) { /* Consider the innermost template arguments, since those are the ones that come from the template-id; the template arguments for the enclosing class do not make it type-dependent unless they are used in the type of the decl. */ if (instantiates_primary_template_p (expression) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (expression))))) return true; } /* Otherwise, if the function decl isn't from a dependent scope, it can't be type-dependent. Checking this is important for functions with auto return type, which looks like a dependent type. */ if (TREE_CODE (expression) == FUNCTION_DECL && !(DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) && !(DECL_LANG_SPECIFIC (expression) && DECL_FRIEND_P (expression) && (!DECL_FRIEND_CONTEXT (expression) || dependent_type_p (DECL_FRIEND_CONTEXT (expression)))) && !DECL_LOCAL_FUNCTION_P (expression)) { gcc_assert (!dependent_type_p (TREE_TYPE (expression)) || undeduced_auto_decl (expression)); return false; } /* Always dependent, on the number of arguments if nothing else. */ if (TREE_CODE (expression) == EXPR_PACK_EXPANSION) return true; if (TREE_TYPE (expression) == unknown_type_node) { if (TREE_CODE (expression) == ADDR_EXPR) return type_dependent_expression_p (TREE_OPERAND (expression, 0)); if (TREE_CODE (expression) == COMPONENT_REF || TREE_CODE (expression) == OFFSET_REF) { if (type_dependent_expression_p (TREE_OPERAND (expression, 0))) return true; expression = TREE_OPERAND (expression, 1); if (identifier_p (expression)) return false; } /* SCOPE_REF with non-null TREE_TYPE is always non-dependent. */ if (TREE_CODE (expression) == SCOPE_REF) return false; /* CO_AWAIT/YIELD_EXPR with unknown type is always dependent. */ if (TREE_CODE (expression) == CO_AWAIT_EXPR || TREE_CODE (expression) == CO_YIELD_EXPR) return true; if (BASELINK_P (expression)) { if (BASELINK_OPTYPE (expression) && dependent_type_p (BASELINK_OPTYPE (expression))) return true; expression = BASELINK_FUNCTIONS (expression); } if (TREE_CODE (expression) == TEMPLATE_ID_EXPR) { if (any_dependent_template_arguments_p (TREE_OPERAND (expression, 1))) return true; expression = TREE_OPERAND (expression, 0); if (identifier_p (expression)) return true; } gcc_assert (OVL_P (expression)); for (lkp_iterator iter (expression); iter; ++iter) if (type_dependent_expression_p (*iter)) return true; return false; } /* The type of a non-type template parm declared with a placeholder type depends on the corresponding template argument, even though placeholders are not normally considered dependent. */ if (TREE_CODE (expression) == TEMPLATE_PARM_INDEX && is_auto (TREE_TYPE (expression))) return true; gcc_assert (TREE_CODE (expression) != TYPE_DECL); /* Dependent type attributes might not have made it from the decl to the type yet. */ if (DECL_P (expression) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (expression))) return true; return (dependent_type_p (TREE_TYPE (expression))); } /* [temp.dep.expr]/5: A class member access expression (5.2.5) is type-dependent if the expression refers to a member of the current instantiation and the type of the referenced member is dependent, or the class member access expression refers to a member of an unknown specialization. This function returns true if the OBJECT in such a class member access expression is of an unknown specialization. */ bool type_dependent_object_expression_p (tree object) { /* An IDENTIFIER_NODE can sometimes have a TREE_TYPE, but it's still dependent. */ if (TREE_CODE (object) == IDENTIFIER_NODE) return true; tree scope = TREE_TYPE (object); return (!scope || dependent_scope_p (scope)); } /* walk_tree callback function for instantiation_dependent_expression_p, below. Returns non-zero if a dependent subexpression is found. */ static tree instantiation_dependent_r (tree *tp, int *walk_subtrees, void * /*data*/) { if (TYPE_P (*tp)) { /* We don't have to worry about decltype currently because decltype of an instantiation-dependent expr is a dependent type. This might change depending on the resolution of DR 1172. */ *walk_subtrees = false; return NULL_TREE; } enum tree_code code = TREE_CODE (*tp); switch (code) { /* Don't treat an argument list as dependent just because it has no TREE_TYPE. */ case TREE_LIST: case TREE_VEC: case NONTYPE_ARGUMENT_PACK: return NULL_TREE; case TEMPLATE_PARM_INDEX: if (dependent_type_p (TREE_TYPE (*tp))) return *tp; if (TEMPLATE_PARM_PARAMETER_PACK (*tp)) return *tp; /* We'll check value-dependence separately. */ return NULL_TREE; /* Handle expressions with type operands. */ case SIZEOF_EXPR: case ALIGNOF_EXPR: case TYPEID_EXPR: case AT_ENCODE_EXPR: { tree op = TREE_OPERAND (*tp, 0); if (code == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (*tp)) op = TREE_TYPE (op); if (TYPE_P (op)) { if (dependent_type_p (op)) return *tp; else { *walk_subtrees = false; return NULL_TREE; } } break; } case COMPONENT_REF: if (identifier_p (TREE_OPERAND (*tp, 1))) /* In a template, finish_class_member_access_expr creates a COMPONENT_REF with an IDENTIFIER_NODE for op1 even if it isn't type-dependent, so that we can check access control at instantiation time (PR 42277). See also Core issue 1273. */ return *tp; break; case SCOPE_REF: if (instantiation_dependent_scope_ref_p (*tp)) return *tp; else break; /* Treat statement-expressions as dependent. */ case BIND_EXPR: return *tp; /* Treat requires-expressions as dependent. */ case REQUIRES_EXPR: return *tp; case CALL_EXPR: /* Treat concept checks as dependent. */ if (concept_check_p (*tp)) return *tp; break; case TEMPLATE_ID_EXPR: /* Treat concept checks as dependent. */ if (concept_check_p (*tp)) return *tp; break; case CONSTRUCTOR: if (CONSTRUCTOR_IS_DEPENDENT (*tp)) return *tp; break; default: break; } if (type_dependent_expression_p (*tp)) return *tp; else return NULL_TREE; } /* Returns TRUE if the EXPRESSION is instantiation-dependent, in the sense defined by the ABI: "An expression is instantiation-dependent if it is type-dependent or value-dependent, or it has a subexpression that is type-dependent or value-dependent." Except don't actually check value-dependence for unevaluated expressions, because in sizeof(i) we don't care about the value of i. Checking type-dependence will in turn check value-dependence of array bounds/template arguments as needed. */ bool instantiation_dependent_uneval_expression_p (tree expression) { tree result; if (!processing_template_decl) return false; if (expression == error_mark_node) return false; result = cp_walk_tree_without_duplicates (&expression, instantiation_dependent_r, NULL); return result != NULL_TREE; } /* As above, but also check value-dependence of the expression as a whole. */ bool instantiation_dependent_expression_p (tree expression) { return (instantiation_dependent_uneval_expression_p (expression) || value_dependent_expression_p (expression)); } /* Like type_dependent_expression_p, but it also works while not processing a template definition, i.e. during substitution or mangling. */ bool type_dependent_expression_p_push (tree expr) { bool b; ++processing_template_decl; b = type_dependent_expression_p (expr); --processing_template_decl; return b; } /* Returns TRUE if ARGS contains a type-dependent expression. */ bool any_type_dependent_arguments_p (const vec<tree, va_gc> *args) { unsigned int i; tree arg; FOR_EACH_VEC_SAFE_ELT (args, i, arg) { if (type_dependent_expression_p (arg)) return true; } return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any type-dependent expressions. */ bool any_type_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (type_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any value-dependent expressions. */ bool any_value_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (value_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if the ARG (a template argument) is dependent. */ bool dependent_template_arg_p (tree arg) { if (!processing_template_decl) return false; /* Assume a template argument that was wrongly written by the user is dependent. This is consistent with what any_dependent_template_arguments_p [that calls this function] does. */ if (!arg || arg == error_mark_node) return true; if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) return true; if (TREE_CODE (arg) == TEMPLATE_DECL) { if (DECL_TEMPLATE_PARM_P (arg)) return true; /* A member template of a dependent class is not necessarily type-dependent, but it is a dependent template argument because it will be a member of an unknown specialization to that template. */ tree scope = CP_DECL_CONTEXT (arg); return TYPE_P (scope) && dependent_type_p (scope); } else if (ARGUMENT_PACK_P (arg)) { tree args = ARGUMENT_PACK_ARGS (arg); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) { if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } return false; } else if (TYPE_P (arg)) return dependent_type_p (arg); else return value_dependent_expression_p (arg); } /* Returns true if ARGS (a collection of template arguments) contains any types that require structural equality testing. */ bool any_template_arguments_need_structural_equality_p (tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) { tree arg = TREE_VEC_ELT (level, j); tree packed_args = NULL_TREE; int k, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (k = 0; k < len; ++k) { if (packed_args) arg = TREE_VEC_ELT (packed_args, k); if (error_operand_p (arg)) return true; else if (TREE_CODE (arg) == TEMPLATE_DECL) continue; else if (TYPE_P (arg) && TYPE_STRUCTURAL_EQUALITY_P (arg)) return true; else if (!TYPE_P (arg) && TREE_TYPE (arg) && TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (arg))) return true; } } } return false; } /* Returns true if ARGS (a collection of template arguments) contains any dependent arguments. */ bool any_dependent_template_arguments_p (const_tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (dependent_template_arg_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns true if ARGS contains any errors. */ bool any_erroneous_template_args_p (const_tree args) { int i; int j; if (args == error_mark_node) return true; if (args && TREE_CODE (args) != TREE_VEC) { if (tree ti = get_template_info (args)) args = TI_ARGS (ti); else args = NULL_TREE; } if (!args) return false; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (error_operand_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns TRUE if the template TMPL is type-dependent. */ bool dependent_template_p (tree tmpl) { if (TREE_CODE (tmpl) == OVERLOAD) { for (lkp_iterator iter (tmpl); iter; ++iter) if (dependent_template_p (*iter)) return true; return false; } /* Template template parameters are dependent. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl) || TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM) return true; /* So are names that have not been looked up. */ if (TREE_CODE (tmpl) == SCOPE_REF || identifier_p (tmpl)) return true; return false; } /* Returns TRUE if the specialization TMPL<ARGS> is dependent. */ bool dependent_template_id_p (tree tmpl, tree args) { return (dependent_template_p (tmpl) || any_dependent_template_arguments_p (args)); } /* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors are dependent. */ bool dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv) { int i; if (!processing_template_decl) return false; for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); if (type_dependent_expression_p (decl) || TREE_CODE (decl) == SCOPE_REF) return true; if (init && type_dependent_expression_p (init)) return true; if (cond == global_namespace) return true; if (type_dependent_expression_p (cond)) return true; if (COMPARISON_CLASS_P (cond) && (type_dependent_expression_p (TREE_OPERAND (cond, 0)) || type_dependent_expression_p (TREE_OPERAND (cond, 1)))) return true; if (TREE_CODE (incr) == MODOP_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0)) || type_dependent_expression_p (TREE_OPERAND (incr, 2))) return true; } else if (type_dependent_expression_p (incr)) return true; else if (TREE_CODE (incr) == MODIFY_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0))) return true; else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1))) { tree t = TREE_OPERAND (incr, 1); if (type_dependent_expression_p (TREE_OPERAND (t, 0)) || type_dependent_expression_p (TREE_OPERAND (t, 1))) return true; /* If this loop has a class iterator with != comparison with increment other than i++/++i/i--/--i, make sure the increment is constant. */ if (CLASS_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (cond) == NE_EXPR) { if (TREE_OPERAND (t, 0) == decl) t = TREE_OPERAND (t, 1); else t = TREE_OPERAND (t, 0); if (TREE_CODE (t) != INTEGER_CST) return true; } } } } return false; } /* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if no such TYPE can be found. Note that this function peers inside uninstantiated templates and therefore should be used only in extremely limited situations. ONLY_CURRENT_P restricts this peering to the currently open classes hierarchy (which is required when comparing types). */ tree resolve_typename_type (tree type, bool only_current_p) { tree scope; tree name; tree decl; int quals; tree pushed_scope; tree result; gcc_assert (TREE_CODE (type) == TYPENAME_TYPE); scope = TYPE_CONTEXT (type); /* We shouldn't have built a TYPENAME_TYPE with a non-dependent scope. */ gcc_checking_assert (uses_template_parms (scope)); /* Usually the non-qualified identifier of a TYPENAME_TYPE is TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified identifier of the TYPENAME_TYPE anymore. So by getting the TYPE_IDENTIFIER of the _main declaration_ of the TYPENAME_TYPE instead, we avoid messing up with a possible typedef variant case. */ name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type)); /* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve it first before we can figure out what NAME refers to. */ if (TREE_CODE (scope) == TYPENAME_TYPE) { if (TYPENAME_IS_RESOLVING_P (scope)) /* Given a class template A with a dependent base with nested type C, typedef typename A::C::C C will land us here, as trying to resolve the initial A::C leads to the local C typedef, which leads back to A::C::C. So we break the recursion now. */ return type; else scope = resolve_typename_type (scope, only_current_p); } /* If we don't know what SCOPE refers to, then we cannot resolve the TYPENAME_TYPE. */ if (!CLASS_TYPE_P (scope)) return type; /* If this is a typedef, we don't want to look inside (c++/11987). */ if (typedef_variant_p (type)) return type; /* If SCOPE isn't the template itself, it will not have a valid TYPE_FIELDS list. */ if (same_type_p (scope, CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope))) /* scope is either the template itself or a compatible instantiation like X<T>, so look up the name in the original template. */ scope = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope); /* If scope has no fields, it can't be a current instantiation. Check this before currently_open_class to avoid infinite recursion (71515). */ if (!TYPE_FIELDS (scope)) return type; /* If the SCOPE is not the current instantiation, there's no reason to look inside it. */ if (only_current_p && !currently_open_class (scope)) return type; /* Enter the SCOPE so that name lookup will be resolved as if we were in the class definition. In particular, SCOPE will no longer be considered a dependent type. */ pushed_scope = push_scope (scope); /* Look up the declaration. */ decl = lookup_member (scope, name, /*protect=*/0, /*want_type=*/true, tf_warning_or_error); result = NULL_TREE; /* For a TYPENAME_TYPE like "typename X::template Y<T>", we want to find a TEMPLATE_DECL. Otherwise, we want to find a TYPE_DECL. */ tree fullname = TYPENAME_TYPE_FULLNAME (type); if (!decl) /*nop*/; else if (identifier_p (fullname) && TREE_CODE (decl) == TYPE_DECL) { result = TREE_TYPE (decl); if (result == error_mark_node) result = NULL_TREE; } else if (TREE_CODE (fullname) == TEMPLATE_ID_EXPR && DECL_CLASS_TEMPLATE_P (decl)) { /* Obtain the template and the arguments. */ tree tmpl = TREE_OPERAND (fullname, 0); if (TREE_CODE (tmpl) == IDENTIFIER_NODE) { /* We get here with a plain identifier because a previous tentative parse of the nested-name-specifier as part of a ptr-operator saw ::template X<A>. The use of ::template is necessary in a ptr-operator, but wrong in a declarator-id. [temp.names]: In a qualified-id of a declarator-id, the keyword template shall not appear at the top level. */ pedwarn (cp_expr_loc_or_input_loc (fullname), OPT_Wpedantic, "keyword %<template%> not allowed in declarator-id"); tmpl = decl; } tree args = TREE_OPERAND (fullname, 1); /* Instantiate the template. */ result = lookup_template_class (tmpl, args, NULL_TREE, NULL_TREE, /*entering_scope=*/true, tf_error | tf_user); if (result == error_mark_node) result = NULL_TREE; } /* Leave the SCOPE. */ if (pushed_scope) pop_scope (pushed_scope); /* If we failed to resolve it, return the original typename. */ if (!result) return type; /* If lookup found a typename type, resolve that too. */ if (TREE_CODE (result) == TYPENAME_TYPE && !TYPENAME_IS_RESOLVING_P (result)) { /* Ill-formed programs can cause infinite recursion here, so we must catch that. */ TYPENAME_IS_RESOLVING_P (result) = 1; result = resolve_typename_type (result, only_current_p); TYPENAME_IS_RESOLVING_P (result) = 0; } /* Qualify the resulting type. */ quals = cp_type_quals (type); if (quals) result = cp_build_qualified_type (result, cp_type_quals (result) | quals); return result; } /* EXPR is an expression which is not type-dependent. Return a proxy for EXPR that can be used to compute the types of larger expressions containing EXPR. */ tree build_non_dependent_expr (tree expr) { tree orig_expr = expr; tree inner_expr; /* When checking, try to get a constant value for all non-dependent expressions in order to expose bugs in *_dependent_expression_p and constexpr. This can affect code generation, see PR70704, so only do this for -fchecking=2. */ if (flag_checking > 1 && cxx_dialect >= cxx11 /* Don't do this during nsdmi parsing as it can lead to unexpected recursive instantiations. */ && !parsing_nsdmi () /* Don't do this during concept processing either and for the same reason. */ && !processing_constraint_expression_p ()) fold_non_dependent_expr (expr, tf_none); STRIP_ANY_LOCATION_WRAPPER (expr); /* Preserve OVERLOADs; the functions must be available to resolve types. */ inner_expr = expr; if (TREE_CODE (inner_expr) == STMT_EXPR) inner_expr = stmt_expr_value_expr (inner_expr); if (TREE_CODE (inner_expr) == ADDR_EXPR) inner_expr = TREE_OPERAND (inner_expr, 0); if (TREE_CODE (inner_expr) == COMPONENT_REF) inner_expr = TREE_OPERAND (inner_expr, 1); if (is_overloaded_fn (inner_expr) || TREE_CODE (inner_expr) == OFFSET_REF) return orig_expr; /* There is no need to return a proxy for a variable or enumerator. */ if (VAR_P (expr) || TREE_CODE (expr) == CONST_DECL) return orig_expr; /* Preserve string constants; conversions from string constants to "char *" are allowed, even though normally a "const char *" cannot be used to initialize a "char *". */ if (TREE_CODE (expr) == STRING_CST) return orig_expr; /* Preserve void and arithmetic constants, as an optimization -- there is no reason to create a new node. */ if (TREE_CODE (expr) == VOID_CST || TREE_CODE (expr) == INTEGER_CST || TREE_CODE (expr) == REAL_CST) return orig_expr; /* Preserve THROW_EXPRs -- all throw-expressions have type "void". There is at least one place where we want to know that a particular expression is a throw-expression: when checking a ?: expression, there are special rules if the second or third argument is a throw-expression. */ if (TREE_CODE (expr) == THROW_EXPR) return orig_expr; /* Don't wrap an initializer list, we need to be able to look inside. */ if (BRACE_ENCLOSED_INITIALIZER_P (expr)) return orig_expr; /* Don't wrap a dummy object, we need to be able to test for it. */ if (is_dummy_object (expr)) return orig_expr; if (TREE_CODE (expr) == COND_EXPR) return build3 (COND_EXPR, TREE_TYPE (expr), build_non_dependent_expr (TREE_OPERAND (expr, 0)), (TREE_OPERAND (expr, 1) ? build_non_dependent_expr (TREE_OPERAND (expr, 1)) : build_non_dependent_expr (TREE_OPERAND (expr, 0))), build_non_dependent_expr (TREE_OPERAND (expr, 2))); if (TREE_CODE (expr) == COMPOUND_EXPR && !COMPOUND_EXPR_OVERLOADED (expr)) return build2 (COMPOUND_EXPR, TREE_TYPE (expr), TREE_OPERAND (expr, 0), build_non_dependent_expr (TREE_OPERAND (expr, 1))); /* If the type is unknown, it can't really be non-dependent */ gcc_assert (TREE_TYPE (expr) != unknown_type_node); /* Otherwise, build a NON_DEPENDENT_EXPR. */ return build1_loc (EXPR_LOCATION (orig_expr), NON_DEPENDENT_EXPR, TREE_TYPE (expr), expr); } /* ARGS is a vector of expressions as arguments to a function call. Replace the arguments with equivalent non-dependent expressions. This modifies ARGS in place. */ void make_args_non_dependent (vec<tree, va_gc> *args) { unsigned int ix; tree arg; FOR_EACH_VEC_SAFE_ELT (args, ix, arg) { tree newarg = build_non_dependent_expr (arg); if (newarg != arg) (*args)[ix] = newarg; } } /* Returns a type which represents 'auto' or 'decltype(auto)'. We use a TEMPLATE_TYPE_PARM with a level one deeper than the actual template parms. If set_canonical is true, we set TYPE_CANONICAL on it. */ static tree make_auto_1 (tree name, bool set_canonical) { tree au = cxx_make_type (TEMPLATE_TYPE_PARM); TYPE_NAME (au) = build_decl (input_location, TYPE_DECL, name, au); TYPE_STUB_DECL (au) = TYPE_NAME (au); TEMPLATE_TYPE_PARM_INDEX (au) = build_template_parm_index (0, processing_template_decl + 1, processing_template_decl + 1, TYPE_NAME (au), NULL_TREE); if (set_canonical) TYPE_CANONICAL (au) = canonical_type_parameter (au); DECL_ARTIFICIAL (TYPE_NAME (au)) = 1; SET_DECL_TEMPLATE_PARM_P (TYPE_NAME (au)); if (name == decltype_auto_identifier) AUTO_IS_DECLTYPE (au) = true; return au; } tree make_decltype_auto (void) { return make_auto_1 (decltype_auto_identifier, true); } tree make_auto (void) { return make_auto_1 (auto_identifier, true); } /* Return a C++17 deduction placeholder for class template TMPL. */ tree make_template_placeholder (tree tmpl) { tree t = make_auto_1 (auto_identifier, false); CLASS_PLACEHOLDER_TEMPLATE (t) = tmpl; /* Our canonical type depends on the placeholder. */ TYPE_CANONICAL (t) = canonical_type_parameter (t); return t; } /* True iff T is a C++17 class template deduction placeholder. */ bool template_placeholder_p (tree t) { return is_auto (t) && CLASS_PLACEHOLDER_TEMPLATE (t); } /* Make a "constrained auto" type-specifier. This is an auto or decltype(auto) type with constraints that must be associated after deduction. The constraint is formed from the given concept CON and its optional sequence of template arguments ARGS. TYPE must be the result of make_auto_type or make_decltype_auto_type. */ static tree make_constrained_placeholder_type (tree type, tree con, tree args) { /* Build the constraint. */ tree tmpl = DECL_TI_TEMPLATE (con); tree expr = tmpl; if (TREE_CODE (con) == FUNCTION_DECL) expr = ovl_make (tmpl); expr = build_concept_check (expr, type, args, tf_warning_or_error); PLACEHOLDER_TYPE_CONSTRAINTS (type) = expr; /* Our canonical type depends on the constraint. */ TYPE_CANONICAL (type) = canonical_type_parameter (type); /* Attach the constraint to the type declaration. */ return TYPE_NAME (type); } /* Make a "constrained auto" type-specifier. */ tree make_constrained_auto (tree con, tree args) { tree type = make_auto_1 (auto_identifier, false); return make_constrained_placeholder_type (type, con, args); } /* Make a "constrained decltype(auto)" type-specifier. */ tree make_constrained_decltype_auto (tree con, tree args) { tree type = make_auto_1 (decltype_auto_identifier, false); return make_constrained_placeholder_type (type, con, args); } /* Build and return a concept definition. Like other templates, the CONCEPT_DECL node is wrapped by a TEMPLATE_DECL. This returns the the TEMPLATE_DECL. */ tree finish_concept_definition (cp_expr id, tree init) { gcc_assert (identifier_p (id)); gcc_assert (processing_template_decl); location_t loc = id.get_location(); /* A concept-definition shall not have associated constraints. */ if (TEMPLATE_PARMS_CONSTRAINTS (current_template_parms)) { error_at (loc, "a concept cannot be constrained"); TEMPLATE_PARMS_CONSTRAINTS (current_template_parms) = NULL_TREE; } /* A concept-definition shall appear in namespace scope. Templates aren't allowed in block scope, so we only need to check for class scope. */ if (TYPE_P (current_scope()) || !DECL_NAMESPACE_SCOPE_P (current_scope ())) { error_at (loc, "concept %qE not in namespace scope", *id); return error_mark_node; } /* Initially build the concept declaration; it's type is bool. */ tree decl = build_lang_decl_loc (loc, CONCEPT_DECL, *id, boolean_type_node); DECL_CONTEXT (decl) = current_scope (); DECL_INITIAL (decl) = init; /* Push the enclosing template. */ return push_template_decl (decl); } /* Given type ARG, return std::initializer_list<ARG>. */ static tree listify (tree arg) { tree std_init_list = get_namespace_binding (std_node, init_list_identifier); if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list)) { gcc_rich_location richloc (input_location); maybe_add_include_fixit (&richloc, "<initializer_list>", false); error_at (&richloc, "deducing from brace-enclosed initializer list" " requires %<#include <initializer_list>%>"); return error_mark_node; } tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = arg; return lookup_template_class (std_init_list, argvec, NULL_TREE, NULL_TREE, 0, tf_warning_or_error); } /* Replace auto in TYPE with std::initializer_list<auto>. */ static tree listify_autos (tree type, tree auto_node) { tree init_auto = listify (strip_top_quals (auto_node)); tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = init_auto; if (processing_template_decl) argvec = add_to_template_args (current_template_args (), argvec); return tsubst (type, argvec, tf_warning_or_error, NULL_TREE); } /* Hash traits for hashing possibly constrained 'auto' TEMPLATE_TYPE_PARMs for use by do_auto_deduction. */ struct auto_hash : default_hash_traits<tree> { static inline hashval_t hash (tree); static inline bool equal (tree, tree); }; /* Hash the 'auto' T. */ inline hashval_t auto_hash::hash (tree t) { if (tree c = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (t))) /* Matching constrained-type-specifiers denote the same template parameter, so hash the constraint. */ return hash_placeholder_constraint (c); else /* But unconstrained autos are all separate, so just hash the pointer. */ return iterative_hash_object (t, 0); } /* Compare two 'auto's. */ inline bool auto_hash::equal (tree t1, tree t2) { if (t1 == t2) return true; tree c1 = PLACEHOLDER_TYPE_CONSTRAINTS (t1); tree c2 = PLACEHOLDER_TYPE_CONSTRAINTS (t2); /* Two unconstrained autos are distinct. */ if (!c1 || !c2) return false; return equivalent_placeholder_constraints (c1, c2); } /* for_each_template_parm callback for extract_autos: if t is a (possibly constrained) auto, add it to the vector. */ static int extract_autos_r (tree t, void *data) { hash_table<auto_hash> &hash = *(hash_table<auto_hash>*)data; if (is_auto (t)) { /* All the autos were built with index 0; fix that up now. */ tree *p = hash.find_slot (t, INSERT); unsigned idx; if (*p) /* If this is a repeated constrained-type-specifier, use the index we chose before. */ idx = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (*p)); else { /* Otherwise this is new, so use the current count. */ *p = t; idx = hash.elements () - 1; } TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (t)) = idx; } /* Always keep walking. */ return 0; } /* Return a TREE_VEC of the 'auto's used in type under the Concepts TS, which says they can appear anywhere in the type. */ static tree extract_autos (tree type) { hash_set<tree> visited; hash_table<auto_hash> hash (2); for_each_template_parm (type, extract_autos_r, &hash, &visited, true); tree tree_vec = make_tree_vec (hash.elements()); for (hash_table<auto_hash>::iterator iter = hash.begin(); iter != hash.end(); ++iter) { tree elt = *iter; unsigned i = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (elt)); TREE_VEC_ELT (tree_vec, i) = build_tree_list (NULL_TREE, TYPE_NAME (elt)); } return tree_vec; } /* The stem for deduction guide names. */ const char *const dguide_base = "__dguide_"; /* Return the name for a deduction guide for class template TMPL. */ tree dguide_name (tree tmpl) { tree type = (TYPE_P (tmpl) ? tmpl : TREE_TYPE (tmpl)); tree tname = TYPE_IDENTIFIER (type); char *buf = (char *) alloca (1 + strlen (dguide_base) + IDENTIFIER_LENGTH (tname)); memcpy (buf, dguide_base, strlen (dguide_base)); memcpy (buf + strlen (dguide_base), IDENTIFIER_POINTER (tname), IDENTIFIER_LENGTH (tname) + 1); tree dname = get_identifier (buf); TREE_TYPE (dname) = type; return dname; } /* True if NAME is the name of a deduction guide. */ bool dguide_name_p (tree name) { return (TREE_CODE (name) == IDENTIFIER_NODE && TREE_TYPE (name) && !strncmp (IDENTIFIER_POINTER (name), dguide_base, strlen (dguide_base))); } /* True if FN is a deduction guide. */ bool deduction_guide_p (const_tree fn) { if (DECL_P (fn)) if (tree name = DECL_NAME (fn)) return dguide_name_p (name); return false; } /* True if FN is the copy deduction guide, i.e. A(A)->A. */ bool copy_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree parms = FUNCTION_FIRST_USER_PARMTYPE (DECL_TI_TEMPLATE (fn)); return (TREE_CHAIN (parms) == void_list_node && same_type_p (TREE_VALUE (parms), TREE_TYPE (DECL_NAME (fn)))); } /* True if FN is a guide generated from a constructor template. */ bool template_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree tmpl = DECL_TI_TEMPLATE (fn); if (tree org = DECL_ABSTRACT_ORIGIN (tmpl)) return PRIMARY_TEMPLATE_P (org); return false; } /* OLDDECL is a _DECL for a template parameter. Return a similar parameter at LEVEL:INDEX, using tsubst_args and complain for substitution into non-type template parameter types. Note that the handling of template template parameters relies on current_template_parms being set appropriately for the new template. */ static tree rewrite_template_parm (tree olddecl, unsigned index, unsigned level, tree tsubst_args, tsubst_flags_t complain) { if (olddecl == error_mark_node) return error_mark_node; tree oldidx = get_template_parm_index (olddecl); tree newtype; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { tree oldtype = TREE_TYPE (olddecl); newtype = cxx_make_type (TREE_CODE (oldtype)); TYPE_MAIN_VARIANT (newtype) = newtype; if (TREE_CODE (oldtype) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (newtype) = TEMPLATE_TYPE_PARM_FOR_CLASS (oldtype); } else { newtype = TREE_TYPE (olddecl); if (type_uses_auto (newtype)) { // Substitute once to fix references to other template parameters. newtype = tsubst (newtype, tsubst_args, complain|tf_partial, NULL_TREE); // Now substitute again to reduce the level of the auto. newtype = tsubst (newtype, current_template_args (), complain, NULL_TREE); } else newtype = tsubst (newtype, tsubst_args, complain, NULL_TREE); } tree newdecl = build_decl (DECL_SOURCE_LOCATION (olddecl), TREE_CODE (olddecl), DECL_NAME (olddecl), newtype); SET_DECL_TEMPLATE_PARM_P (newdecl); tree newidx; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { newidx = TEMPLATE_TYPE_PARM_INDEX (newtype) = build_template_parm_index (index, level, level, newdecl, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); TYPE_STUB_DECL (newtype) = TYPE_NAME (newtype) = newdecl; if (TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (olddecl))) SET_TYPE_STRUCTURAL_EQUALITY (newtype); else TYPE_CANONICAL (newtype) = canonical_type_parameter (newtype); if (TREE_CODE (olddecl) == TEMPLATE_DECL) { DECL_TEMPLATE_RESULT (newdecl) = build_decl (DECL_SOURCE_LOCATION (olddecl), TYPE_DECL, DECL_NAME (olddecl), newtype); DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (newdecl)) = true; // First create a copy (ttargs) of tsubst_args with an // additional level for the template template parameter's own // template parameters (ttparms). tree ttparms = (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (olddecl))); const int depth = TMPL_ARGS_DEPTH (tsubst_args); tree ttargs = make_tree_vec (depth + 1); for (int i = 0; i < depth; ++i) TREE_VEC_ELT (ttargs, i) = TREE_VEC_ELT (tsubst_args, i); TREE_VEC_ELT (ttargs, depth) = template_parms_level_to_args (ttparms); // Substitute ttargs into ttparms to fix references to // other template parameters. ttparms = tsubst_template_parms_level (ttparms, ttargs, complain|tf_partial); // Now substitute again with args based on tparms, to reduce // the level of the ttparms. ttargs = current_template_args (); ttparms = tsubst_template_parms_level (ttparms, ttargs, complain); // Finally, tack the adjusted parms onto tparms. ttparms = tree_cons (size_int (depth), ttparms, current_template_parms); DECL_TEMPLATE_PARMS (newdecl) = ttparms; } } else { tree oldconst = TEMPLATE_PARM_DECL (oldidx); tree newconst = build_decl (DECL_SOURCE_LOCATION (oldconst), TREE_CODE (oldconst), DECL_NAME (oldconst), newtype); TREE_CONSTANT (newconst) = TREE_CONSTANT (newdecl) = TREE_READONLY (newconst) = TREE_READONLY (newdecl) = true; SET_DECL_TEMPLATE_PARM_P (newconst); newidx = build_template_parm_index (index, level, level, newconst, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); DECL_INITIAL (newdecl) = DECL_INITIAL (newconst) = newidx; } return newdecl; } /* As rewrite_template_parm, but for the whole TREE_LIST representing a template parameter. */ static tree rewrite_tparm_list (tree oldelt, unsigned index, unsigned level, tree targs, unsigned targs_index, tsubst_flags_t complain) { tree olddecl = TREE_VALUE (oldelt); tree newdecl = rewrite_template_parm (olddecl, index, level, targs, complain); if (newdecl == error_mark_node) return error_mark_node; tree newdef = tsubst_template_arg (TREE_PURPOSE (oldelt), targs, complain, NULL_TREE); tree list = build_tree_list (newdef, newdecl); TEMPLATE_PARM_CONSTRAINTS (list) = tsubst_constraint_info (TEMPLATE_PARM_CONSTRAINTS (oldelt), targs, complain, NULL_TREE); int depth = TMPL_ARGS_DEPTH (targs); TMPL_ARG (targs, depth, targs_index) = template_parm_to_arg (list); return list; } /* Returns a C++17 class deduction guide template based on the constructor CTOR. As a special case, CTOR can be a RECORD_TYPE for an implicit default guide, REFERENCE_TYPE for an implicit copy/move guide, or TREE_LIST for an aggregate initialization guide. */ static tree build_deduction_guide (tree type, tree ctor, tree outer_args, tsubst_flags_t complain) { tree tparms, targs, fparms, fargs, ci; bool memtmpl = false; bool explicit_p; location_t loc; tree fn_tmpl = NULL_TREE; if (outer_args) { ++processing_template_decl; type = tsubst (type, outer_args, complain, CLASSTYPE_TI_TEMPLATE (type)); --processing_template_decl; } if (!DECL_DECLARES_FUNCTION_P (ctor)) { if (TYPE_P (ctor)) { bool copy_p = TYPE_REF_P (ctor); if (copy_p) fparms = tree_cons (NULL_TREE, type, void_list_node); else fparms = void_list_node; } else if (TREE_CODE (ctor) == TREE_LIST) fparms = ctor; else gcc_unreachable (); tree ctmpl = CLASSTYPE_TI_TEMPLATE (type); tparms = DECL_TEMPLATE_PARMS (ctmpl); targs = CLASSTYPE_TI_ARGS (type); ci = NULL_TREE; fargs = NULL_TREE; loc = DECL_SOURCE_LOCATION (ctmpl); explicit_p = false; } else { ++processing_template_decl; bool ok = true; fn_tmpl = (TREE_CODE (ctor) == TEMPLATE_DECL ? ctor : DECL_TI_TEMPLATE (ctor)); if (outer_args) fn_tmpl = tsubst (fn_tmpl, outer_args, complain, ctor); ctor = DECL_TEMPLATE_RESULT (fn_tmpl); tparms = DECL_TEMPLATE_PARMS (fn_tmpl); /* If type is a member class template, DECL_TI_ARGS (ctor) will have fully specialized args for the enclosing class. Strip those off, as the deduction guide won't have those template parameters. */ targs = get_innermost_template_args (DECL_TI_ARGS (ctor), TMPL_PARMS_DEPTH (tparms)); /* Discard the 'this' parameter. */ fparms = FUNCTION_ARG_CHAIN (ctor); fargs = TREE_CHAIN (DECL_ARGUMENTS (ctor)); ci = get_constraints (ctor); loc = DECL_SOURCE_LOCATION (ctor); explicit_p = DECL_NONCONVERTING_P (ctor); if (PRIMARY_TEMPLATE_P (fn_tmpl)) { memtmpl = true; /* For a member template constructor, we need to flatten the two template parameter lists into one, and then adjust the function signature accordingly. This gets...complicated. */ tree save_parms = current_template_parms; /* For a member template we should have two levels of parms/args, one for the class and one for the constructor. We stripped specialized args for further enclosing classes above. */ const int depth = 2; gcc_assert (TMPL_ARGS_DEPTH (targs) == depth); /* Template args for translating references to the two-level template parameters into references to the one-level template parameters we are creating. */ tree tsubst_args = copy_node (targs); TMPL_ARGS_LEVEL (tsubst_args, depth) = copy_node (TMPL_ARGS_LEVEL (tsubst_args, depth)); /* Template parms for the constructor template. */ tree ftparms = TREE_VALUE (tparms); unsigned flen = TREE_VEC_LENGTH (ftparms); /* Template parms for the class template. */ tparms = TREE_CHAIN (tparms); tree ctparms = TREE_VALUE (tparms); unsigned clen = TREE_VEC_LENGTH (ctparms); /* Template parms for the deduction guide start as a copy of the template parms for the class. We set current_template_parms for lookup_template_class_1. */ current_template_parms = tparms = copy_node (tparms); tree new_vec = TREE_VALUE (tparms) = make_tree_vec (flen + clen); for (unsigned i = 0; i < clen; ++i) TREE_VEC_ELT (new_vec, i) = TREE_VEC_ELT (ctparms, i); /* Now we need to rewrite the constructor parms to append them to the class parms. */ for (unsigned i = 0; i < flen; ++i) { unsigned index = i + clen; unsigned level = 1; tree oldelt = TREE_VEC_ELT (ftparms, i); tree newelt = rewrite_tparm_list (oldelt, index, level, tsubst_args, i, complain); if (newelt == error_mark_node) ok = false; TREE_VEC_ELT (new_vec, index) = newelt; } /* Now we have a final set of template parms to substitute into the function signature. */ targs = template_parms_to_args (tparms); fparms = tsubst_arg_types (fparms, tsubst_args, NULL_TREE, complain, ctor); if (fparms == error_mark_node) ok = false; if (ci) ci = tsubst_constraint_info (ci, tsubst_args, complain, ctor); /* Parms are to have DECL_CHAIN tsubsted, which would be skipped if cp_unevaluated_operand. */ cp_evaluated ev; fargs = tsubst (fargs, tsubst_args, complain, ctor); current_template_parms = save_parms; } --processing_template_decl; if (!ok) return error_mark_node; } if (!memtmpl) { /* Copy the parms so we can set DECL_PRIMARY_TEMPLATE. */ tparms = copy_node (tparms); INNERMOST_TEMPLATE_PARMS (tparms) = copy_node (INNERMOST_TEMPLATE_PARMS (tparms)); } tree fntype = build_function_type (type, fparms); tree ded_fn = build_lang_decl_loc (loc, FUNCTION_DECL, dguide_name (type), fntype); DECL_ARGUMENTS (ded_fn) = fargs; DECL_ARTIFICIAL (ded_fn) = true; DECL_NONCONVERTING_P (ded_fn) = explicit_p; tree ded_tmpl = build_template_decl (ded_fn, tparms, /*member*/false); DECL_ARTIFICIAL (ded_tmpl) = true; DECL_TEMPLATE_RESULT (ded_tmpl) = ded_fn; TREE_TYPE (ded_tmpl) = TREE_TYPE (ded_fn); DECL_TEMPLATE_INFO (ded_fn) = build_template_info (ded_tmpl, targs); DECL_PRIMARY_TEMPLATE (ded_tmpl) = ded_tmpl; if (DECL_P (ctor)) DECL_ABSTRACT_ORIGIN (ded_tmpl) = fn_tmpl; if (ci) set_constraints (ded_tmpl, ci); return ded_tmpl; } /* Add to LIST the member types for the reshaped initializer CTOR. */ static tree collect_ctor_idx_types (tree ctor, tree list) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (ctor); tree idx, val; unsigned i; FOR_EACH_CONSTRUCTOR_ELT (v, i, idx, val) { if (BRACE_ENCLOSED_INITIALIZER_P (val) && CONSTRUCTOR_NELTS (val)) if (tree subidx = CONSTRUCTOR_ELT (val, 0)->index) if (TREE_CODE (subidx) == FIELD_DECL) { list = collect_ctor_idx_types (val, list); continue; } tree ftype = finish_decltype_type (idx, true, tf_none); list = tree_cons (NULL_TREE, ftype, list); } return list; } /* Return whether ETYPE is, or is derived from, a specialization of TMPL. */ static bool is_spec_or_derived (tree etype, tree tmpl) { if (!etype || !CLASS_TYPE_P (etype)) return false; tree type = TREE_TYPE (tmpl); tree tparms = (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl))); tree targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int err = unify (tparms, targs, type, etype, UNIFY_ALLOW_DERIVED, /*explain*/false); ggc_free (targs); return !err; } /* Return a C++20 aggregate deduction candidate for TYPE initialized from INIT. */ static tree maybe_aggr_guide (tree tmpl, tree init, vec<tree,va_gc> *args) { if (cxx_dialect < cxx2a) return NULL_TREE; if (init == NULL_TREE) return NULL_TREE; tree type = TREE_TYPE (tmpl); if (!CP_AGGREGATE_TYPE_P (type)) return NULL_TREE; /* No aggregate candidate for copy-initialization. */ if (args->length() == 1) { tree val = (*args)[0]; if (is_spec_or_derived (tmpl, TREE_TYPE (val))) return NULL_TREE; } /* If we encounter a problem, we just won't add the candidate. */ tsubst_flags_t complain = tf_none; tree parms = NULL_TREE; if (BRACE_ENCLOSED_INITIALIZER_P (init)) { init = reshape_init (type, init, complain); if (init == error_mark_node) return NULL_TREE; parms = collect_ctor_idx_types (init, parms); } else if (TREE_CODE (init) == TREE_LIST) { int len = list_length (init); for (tree field = TYPE_FIELDS (type); len; --len, field = DECL_CHAIN (field)) { field = next_initializable_field (field); if (!field) return NULL_TREE; tree ftype = finish_decltype_type (field, true, complain); parms = tree_cons (NULL_TREE, ftype, parms); } } else /* Aggregate initialization doesn't apply to an initializer expression. */ return NULL_TREE; if (parms) { tree last = parms; parms = nreverse (parms); TREE_CHAIN (last) = void_list_node; tree guide = build_deduction_guide (type, parms, NULL_TREE, complain); return guide; } return NULL_TREE; } /* UGUIDES are the deduction guides for the underlying template of alias template TMPL; adjust them to be deduction guides for TMPL. */ static tree alias_ctad_tweaks (tree tmpl, tree uguides) { /* [over.match.class.deduct]: When resolving a placeholder for a deduced class type (9.2.8.2) where the template-name names an alias template A, the defining-type-id of A must be of the form typename(opt) nested-name-specifier(opt) template(opt) simple-template-id as specified in 9.2.8.2. The guides of A are the set of functions or function templates formed as follows. For each function or function template f in the guides of the template named by the simple-template-id of the defining-type-id, the template arguments of the return type of f are deduced from the defining-type-id of A according to the process in 13.10.2.5 with the exception that deduction does not fail if not all template arguments are deduced. Let g denote the result of substituting these deductions into f. If substitution succeeds, form a function or function template f' with the following properties and add it to the set of guides of A: * The function type of f' is the function type of g. * If f is a function template, f' is a function template whose template parameter list consists of all the template parameters of A (including their default template arguments) that appear in the above deductions or (recursively) in their default template arguments, followed by the template parameters of f that were not deduced (including their default template arguments), otherwise f' is not a function template. * The associated constraints (13.5.2) are the conjunction of the associated constraints of g and a constraint that is satisfied if and only if the arguments of A are deducible (see below) from the return type. * If f is a copy deduction candidate (12.4.1.8), then f' is considered to be so as well. * If f was generated from a deduction-guide (12.4.1.8), then f' is considered to be so as well. * The explicit-specifier of f' is the explicit-specifier of g (if any). */ /* This implementation differs from the above in two significant ways: 1) We include all template parameters of A, not just some. 2) The added constraint is same_type instead of deducible. I believe that while it's probably possible to construct a testcase that behaves differently with this simplification, it should have the same effect for real uses. Including all template parameters means that we deduce all parameters of A when resolving the call, so when we're in the constraint we don't need to deduce them again, we can just check whether the deduction produced the desired result. */ tsubst_flags_t complain = tf_warning_or_error; tree atype = TREE_TYPE (tmpl); tree aguides = NULL_TREE; tree atparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); unsigned natparms = TREE_VEC_LENGTH (atparms); tree utype = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); for (ovl_iterator iter (uguides); iter; ++iter) { tree f = *iter; tree in_decl = f; location_t loc = DECL_SOURCE_LOCATION (f); tree ret = TREE_TYPE (TREE_TYPE (f)); tree fprime = f; if (TREE_CODE (f) == TEMPLATE_DECL) { processing_template_decl_sentinel ptds (/*reset*/false); ++processing_template_decl; /* Deduce template arguments for f from the type-id of A. */ tree ftparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (f)); unsigned len = TREE_VEC_LENGTH (ftparms); tree targs = make_tree_vec (len); int err = unify (ftparms, targs, ret, utype, UNIFY_ALLOW_NONE, false); gcc_assert (!err); /* The number of parms for f' is the number of parms for A plus non-deduced parms of f. */ unsigned ndlen = 0; unsigned j; for (unsigned i = 0; i < len; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) ++ndlen; tree gtparms = make_tree_vec (natparms + ndlen); /* First copy over the parms of A. */ for (j = 0; j < natparms; ++j) TREE_VEC_ELT (gtparms, j) = TREE_VEC_ELT (atparms, j); /* Now rewrite the non-deduced parms of f. */ for (unsigned i = 0; ndlen && i < len; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) { --ndlen; unsigned index = j++; unsigned level = 1; tree oldlist = TREE_VEC_ELT (ftparms, i); tree list = rewrite_tparm_list (oldlist, index, level, targs, i, complain); TREE_VEC_ELT (gtparms, index) = list; } gtparms = build_tree_list (size_one_node, gtparms); /* Substitute the deduced arguments plus the rewritten template parameters into f to get g. This covers the type, copyness, guideness, and explicit-specifier. */ tree g = tsubst_decl (DECL_TEMPLATE_RESULT (f), targs, complain); if (g == error_mark_node) return error_mark_node; DECL_USE_TEMPLATE (g) = 0; fprime = build_template_decl (g, gtparms, false); DECL_TEMPLATE_RESULT (fprime) = g; TREE_TYPE (fprime) = TREE_TYPE (g); tree gtargs = template_parms_to_args (gtparms); DECL_TEMPLATE_INFO (g) = build_template_info (fprime, gtargs); DECL_PRIMARY_TEMPLATE (fprime) = fprime; /* Substitute the associated constraints. */ tree ci = get_constraints (f); if (ci) ci = tsubst_constraint_info (ci, targs, complain, in_decl); if (ci == error_mark_node) return error_mark_node; /* Add a constraint that the return type matches the instantiation of A with the same template arguments. */ ret = TREE_TYPE (TREE_TYPE (fprime)); if (!same_type_p (atype, ret) /* FIXME this should mean they don't compare as equivalent. */ || dependent_alias_template_spec_p (atype, nt_opaque)) { tree same = finish_trait_expr (loc, CPTK_IS_SAME_AS, atype, ret); ci = append_constraint (ci, same); } if (ci) set_constraints (fprime, ci); } else { /* For a non-template deduction guide, if the arguments of A aren't deducible from the return type, don't add the candidate. */ tree targs = make_tree_vec (natparms); int err = unify (atparms, targs, utype, ret, UNIFY_ALLOW_NONE, false); for (unsigned i = 0; !err && i < natparms; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) err = true; if (err) continue; } aguides = lookup_add (fprime, aguides); } return aguides; } /* Return artificial deduction guides built from the constructors of class template TMPL. */ static tree ctor_deduction_guides_for (tree tmpl, tsubst_flags_t complain) { tree type = TREE_TYPE (tmpl); tree outer_args = NULL_TREE; if (DECL_CLASS_SCOPE_P (tmpl) && CLASSTYPE_TEMPLATE_INSTANTIATION (DECL_CONTEXT (tmpl))) { outer_args = CLASSTYPE_TI_ARGS (DECL_CONTEXT (tmpl)); type = TREE_TYPE (most_general_template (tmpl)); } tree cands = NULL_TREE; for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (type)); iter; ++iter) { /* Skip inherited constructors. */ if (iter.using_p ()) continue; tree guide = build_deduction_guide (type, *iter, outer_args, complain); cands = lookup_add (guide, cands); } /* Add implicit default constructor deduction guide. */ if (!TYPE_HAS_USER_CONSTRUCTOR (type)) { tree guide = build_deduction_guide (type, type, outer_args, complain); cands = lookup_add (guide, cands); } /* Add copy guide. */ { tree gtype = build_reference_type (type); tree guide = build_deduction_guide (type, gtype, outer_args, complain); cands = lookup_add (guide, cands); } return cands; } static GTY((deletable)) hash_map<tree, tree_pair_p> *dguide_cache; /* Return the non-aggregate deduction guides for deducible template TMPL. The aggregate candidate is added separately because it depends on the initializer. Set ANY_DGUIDES_P if we find a non-implicit deduction guide. */ static tree deduction_guides_for (tree tmpl, bool &any_dguides_p, tsubst_flags_t complain) { tree guides = NULL_TREE; if (DECL_ALIAS_TEMPLATE_P (tmpl)) { tree under = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree tinfo = get_template_info (under); guides = deduction_guides_for (TI_TEMPLATE (tinfo), any_dguides_p, complain); } else { guides = lookup_qualified_name (CP_DECL_CONTEXT (tmpl), dguide_name (tmpl), /*type*/false, /*complain*/false, /*hidden*/false); if (guides == error_mark_node) guides = NULL_TREE; else any_dguides_p = true; } /* Cache the deduction guides for a template. We also remember the result of lookup, and rebuild everything if it changes; should be very rare. */ tree_pair_p cache = NULL; if (tree_pair_p &r = hash_map_safe_get_or_insert<hm_ggc> (dguide_cache, tmpl)) { cache = r; if (cache->purpose == guides) return cache->value; } else { r = cache = ggc_cleared_alloc<tree_pair_s> (); cache->purpose = guides; } tree cands = NULL_TREE; if (DECL_ALIAS_TEMPLATE_P (tmpl)) cands = alias_ctad_tweaks (tmpl, guides); else { cands = ctor_deduction_guides_for (tmpl, complain); for (ovl_iterator it (guides); it; ++it) cands = lookup_add (*it, cands); } cache->value = cands; return cands; } /* Return whether TMPL is a (class template argument-) deducible template. */ bool ctad_template_p (tree tmpl) { /* A deducible template is either a class template or is an alias template whose defining-type-id is of the form typename(opt) nested-name-specifier(opt) template(opt) simple-template-id where the nested-name-specifier (if any) is non-dependent and the template-name of the simple-template-id names a deducible template. */ if (DECL_CLASS_TEMPLATE_P (tmpl) || DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)) return true; if (!DECL_ALIAS_TEMPLATE_P (tmpl)) return false; tree orig = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); if (tree tinfo = get_template_info (orig)) return ctad_template_p (TI_TEMPLATE (tinfo)); return false; } /* Deduce template arguments for the class template placeholder PTYPE for template TMPL based on the initializer INIT, and return the resulting type. */ static tree do_class_deduction (tree ptype, tree tmpl, tree init, int flags, tsubst_flags_t complain) { /* We should have handled this in the caller. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)) return ptype; /* Initializing one placeholder from another. */ if (init && TREE_CODE (init) == TEMPLATE_PARM_INDEX && is_auto (TREE_TYPE (init)) && CLASS_PLACEHOLDER_TEMPLATE (TREE_TYPE (init)) == tmpl) return cp_build_qualified_type (TREE_TYPE (init), cp_type_quals (ptype)); /* Look through alias templates that just rename another template. */ tmpl = get_underlying_template (tmpl); if (!ctad_template_p (tmpl)) { if (complain & tf_error) error ("non-deducible template %qT used without template arguments", tmpl); return error_mark_node; } else if (cxx_dialect < cxx2a && DECL_ALIAS_TEMPLATE_P (tmpl)) { /* This doesn't affect conforming C++17 code, so just pedwarn. */ if (complain & tf_warning_or_error) pedwarn (input_location, 0, "alias template deduction only available " "with %<-std=c++2a%> or %<-std=gnu++2a%>"); } tree type = TREE_TYPE (tmpl); bool try_list_ctor = false; releasing_vec rv_args = NULL; vec<tree,va_gc> *&args = *&rv_args; if (init == NULL_TREE) args = make_tree_vector (); else if (BRACE_ENCLOSED_INITIALIZER_P (init)) { try_list_ctor = TYPE_HAS_LIST_CTOR (type); if (try_list_ctor && CONSTRUCTOR_NELTS (init) == 1) { /* As an exception, the first phase in 16.3.1.7 (considering the initializer list as a single argument) is omitted if the initializer list consists of a single expression of type cv U, where U is a specialization of C or a class derived from a specialization of C. */ tree elt = CONSTRUCTOR_ELT (init, 0)->value; if (is_spec_or_derived (TREE_TYPE (elt), tmpl)) try_list_ctor = false; } if (try_list_ctor || is_std_init_list (type)) args = make_tree_vector_single (init); else args = make_tree_vector_from_ctor (init); } else if (TREE_CODE (init) == TREE_LIST) args = make_tree_vector_from_list (init); else args = make_tree_vector_single (init); /* Do this now to avoid problems with erroneous args later on. */ args = resolve_args (args, complain); if (args == NULL) return error_mark_node; bool any_dguides_p = false; tree cands = deduction_guides_for (tmpl, any_dguides_p, complain); if (cands == error_mark_node) return error_mark_node; /* Prune explicit deduction guides in copy-initialization context. */ bool elided = false; if (flags & LOOKUP_ONLYCONVERTING) { for (lkp_iterator iter (cands); !elided && iter; ++iter) if (DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) elided = true; if (elided) { /* Found a nonconverting guide, prune the candidates. */ tree pruned = NULL_TREE; for (lkp_iterator iter (cands); iter; ++iter) if (!DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) pruned = lookup_add (*iter, pruned); cands = pruned; } } if (!any_dguides_p) if (tree guide = maybe_aggr_guide (tmpl, init, args)) cands = lookup_add (guide, cands); tree call = error_mark_node; /* If this is list-initialization and the class has a list constructor, first try deducing from the list as a single argument, as [over.match.list]. */ tree list_cands = NULL_TREE; if (try_list_ctor && cands) for (lkp_iterator iter (cands); iter; ++iter) { tree dg = *iter; if (is_list_ctor (dg)) list_cands = lookup_add (dg, list_cands); } if (list_cands) { ++cp_unevaluated_operand; call = build_new_function_call (list_cands, &args, tf_decltype); --cp_unevaluated_operand; if (call == error_mark_node) { /* That didn't work, now try treating the list as a sequence of arguments. */ release_tree_vector (args); args = make_tree_vector_from_ctor (init); } } if (elided && !cands) { error ("cannot deduce template arguments for copy-initialization" " of %qT, as it has no non-explicit deduction guides or " "user-declared constructors", type); return error_mark_node; } else if (!cands && call == error_mark_node) { error ("cannot deduce template arguments of %qT, as it has no viable " "deduction guides", type); return error_mark_node; } if (call == error_mark_node) { ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, tf_decltype); --cp_unevaluated_operand; } if (call == error_mark_node && (complain & tf_warning_or_error)) { error ("class template argument deduction failed:"); ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, complain | tf_decltype); --cp_unevaluated_operand; if (elided) inform (input_location, "explicit deduction guides not considered " "for copy-initialization"); } return cp_build_qualified_type (TREE_TYPE (call), cp_type_quals (ptype)); } /* Replace occurrences of 'auto' in TYPE with the appropriate type deduced from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE. The CONTEXT determines the context in which auto deduction is performed and is used to control error diagnostics. FLAGS are the LOOKUP_* flags. OUTER_TARGS are used during template argument deduction (context == adc_unify) to properly substitute the result, and is ignored in other contexts. For partial-concept-ids, extra args may be appended to the list of deduced template arguments prior to determining constraint satisfaction. */ tree do_auto_deduction (tree type, tree init, tree auto_node, tsubst_flags_t complain, auto_deduction_context context, tree outer_targs, int flags) { tree targs; if (init == error_mark_node) return error_mark_node; if (init && type_dependent_expression_p (init) && context != adc_unify) /* Defining a subset of type-dependent expressions that we can deduce from ahead of time isn't worth the trouble. */ return type; /* Similarly, we can't deduce from another undeduced decl. */ if (init && undeduced_auto_decl (init)) return type; /* We may be doing a partial substitution, but we still want to replace auto_node. */ complain &= ~tf_partial; if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node)) /* C++17 class template argument deduction. */ return do_class_deduction (type, tmpl, init, flags, complain); if (init == NULL_TREE || TREE_TYPE (init) == NULL_TREE) /* Nothing we can do with this, even in deduction context. */ return type; /* [dcl.spec.auto]: Obtain P from T by replacing the occurrences of auto with either a new invented type template parameter U or, if the initializer is a braced-init-list (8.5.4), with std::initializer_list<U>. */ if (BRACE_ENCLOSED_INITIALIZER_P (init)) { if (!DIRECT_LIST_INIT_P (init)) type = listify_autos (type, auto_node); else if (CONSTRUCTOR_NELTS (init) == 1) init = CONSTRUCTOR_ELT (init, 0)->value; else { if (complain & tf_warning_or_error) { if (permerror (input_location, "direct-list-initialization of " "%<auto%> requires exactly one element")) inform (input_location, "for deduction to %<std::initializer_list%>, use copy-" "list-initialization (i.e. add %<=%> before the %<{%>)"); } type = listify_autos (type, auto_node); } } if (type == error_mark_node) return error_mark_node; init = resolve_nondeduced_context (init, complain); if (context == adc_decomp_type && auto_node == type && init != error_mark_node && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE) /* [dcl.decomp]/1 - if decomposition declaration has no ref-qualifiers and initializer has array type, deduce cv-qualified array type. */ return cp_build_qualified_type_real (TREE_TYPE (init), TYPE_QUALS (type), complain); else if (AUTO_IS_DECLTYPE (auto_node)) { tree stripped_init = tree_strip_any_location_wrapper (init); bool id = (DECL_P (stripped_init) || ((TREE_CODE (init) == COMPONENT_REF || TREE_CODE (init) == SCOPE_REF) && !REF_PARENTHESIZED_P (init))); targs = make_tree_vec (1); TREE_VEC_ELT (targs, 0) = finish_decltype_type (init, id, tf_warning_or_error); if (type != auto_node) { if (complain & tf_error) error ("%qT as type rather than plain %<decltype(auto)%>", type); return error_mark_node; } } else { if (error_operand_p (init)) return error_mark_node; tree parms = build_tree_list (NULL_TREE, type); tree tparms; if (flag_concepts) tparms = extract_autos (type); else { tparms = make_tree_vec (1); TREE_VEC_ELT (tparms, 0) = build_tree_list (NULL_TREE, TYPE_NAME (auto_node)); } targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int val = type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, NULL, /*explain_p=*/false); if (val > 0) { if (processing_template_decl) /* Try again at instantiation time. */ return type; if (type && type != error_mark_node && (complain & tf_error)) /* If type is error_mark_node a diagnostic must have been emitted by now. Also, having a mention to '<type error>' in the diagnostic is not really useful to the user. */ { if (cfun && FNDECL_USED_AUTO (current_function_decl) && (auto_node == DECL_SAVED_AUTO_RETURN_TYPE (current_function_decl)) && LAMBDA_FUNCTION_P (current_function_decl)) error ("unable to deduce lambda return type from %qE", init); else error ("unable to deduce %qT from %qE", type, init); type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, NULL, /*explain_p=*/true); } return error_mark_node; } } /* Check any placeholder constraints against the deduced type. */ if (flag_concepts && !processing_template_decl) if (tree check = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (auto_node))) { /* Use the deduced type to check the associated constraints. If we have a partial-concept-id, rebuild the argument list so that we check using the extra arguments. */ check = unpack_concept_check (check); gcc_assert (TREE_CODE (check) == TEMPLATE_ID_EXPR); tree cdecl = TREE_OPERAND (check, 0); if (OVL_P (cdecl)) cdecl = OVL_FIRST (cdecl); tree cargs = TREE_OPERAND (check, 1); if (TREE_VEC_LENGTH (cargs) > 1) { cargs = copy_node (cargs); TREE_VEC_ELT (cargs, 0) = TREE_VEC_ELT (targs, 0); } else cargs = targs; /* Rebuild the check using the deduced arguments. */ check = build_concept_check (cdecl, cargs, tf_none); if (!constraints_satisfied_p (check)) { if (complain & tf_warning_or_error) { auto_diagnostic_group d; switch (context) { case adc_unspecified: case adc_unify: error("placeholder constraints not satisfied"); break; case adc_variable_type: case adc_decomp_type: error ("deduced initializer does not satisfy " "placeholder constraints"); break; case adc_return_type: error ("deduced return type does not satisfy " "placeholder constraints"); break; case adc_requirement: error ("deduced expression type does not satisfy " "placeholder constraints"); break; } diagnose_constraints (input_location, check, targs); } return error_mark_node; } } if (processing_template_decl && context != adc_unify) outer_targs = current_template_args (); targs = add_to_template_args (outer_targs, targs); return tsubst (type, targs, complain, NULL_TREE); } /* Substitutes LATE_RETURN_TYPE for 'auto' in TYPE and returns the result. */ tree splice_late_return_type (tree type, tree late_return_type) { if (late_return_type) { gcc_assert (is_auto (type) || seen_error ()); return late_return_type; } if (tree auto_node = find_type_usage (type, is_auto)) if (TEMPLATE_TYPE_LEVEL (auto_node) <= processing_template_decl) { /* In an abbreviated function template we didn't know we were dealing with a function template when we saw the auto return type, so rebuild the return type using an auto with the correct level. */ tree new_auto = make_auto_1 (TYPE_IDENTIFIER (auto_node), false); tree auto_vec = make_tree_vec (1); TREE_VEC_ELT (auto_vec, 0) = new_auto; tree targs = add_outermost_template_args (current_template_args (), auto_vec); /* FIXME: We should also rebuild the constraint to refer to the new auto. */ PLACEHOLDER_TYPE_CONSTRAINTS (new_auto) = PLACEHOLDER_TYPE_CONSTRAINTS (auto_node); TYPE_CANONICAL (new_auto) = canonical_type_parameter (new_auto); return tsubst (type, targs, tf_none, NULL_TREE); } return type; } /* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto' or 'decltype(auto)' or a deduced class template. */ bool is_auto (const_tree type) { if (TREE_CODE (type) == TEMPLATE_TYPE_PARM && (TYPE_IDENTIFIER (type) == auto_identifier || TYPE_IDENTIFIER (type) == decltype_auto_identifier)) return true; else return false; } /* for_each_template_parm callback for type_uses_auto. */ int is_auto_r (tree tp, void */*data*/) { return is_auto (tp); } /* Returns the TEMPLATE_TYPE_PARM in TYPE representing `auto' iff TYPE contains a use of `auto'. Returns NULL_TREE otherwise. */ tree type_uses_auto (tree type) { if (type == NULL_TREE) return NULL_TREE; else if (flag_concepts) { /* The Concepts TS allows multiple autos in one type-specifier; just return the first one we find, do_auto_deduction will collect all of them. */ if (uses_template_parms (type)) return for_each_template_parm (type, is_auto_r, /*data*/NULL, /*visited*/NULL, /*nondeduced*/false); else return NULL_TREE; } else return find_type_usage (type, is_auto); } /* Report ill-formed occurrences of auto types in ARGUMENTS. If concepts are enabled, auto is acceptable in template arguments, but only when TEMPL identifies a template class. Return TRUE if any such errors were reported. */ bool check_auto_in_tmpl_args (tree tmpl, tree args) { /* If there were previous errors, nevermind. */ if (!args || TREE_CODE (args) != TREE_VEC) return false; /* If TMPL is an identifier, we're parsing and we can't tell yet whether TMPL is supposed to be a type, a function or a variable. We'll only be able to tell during template substitution, so we expect to be called again then. If concepts are enabled and we know we have a type, we're ok. */ if (flag_concepts && (identifier_p (tmpl) || (DECL_P (tmpl) && (DECL_TYPE_TEMPLATE_P (tmpl) || DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))))) return false; /* Quickly search for any occurrences of auto; usually there won't be any, and then we'll avoid allocating the vector. */ if (!type_uses_auto (args)) return false; bool errors = false; tree vec = extract_autos (args); for (int i = 0; i < TREE_VEC_LENGTH (vec); i++) { tree xauto = TREE_VALUE (TREE_VEC_ELT (vec, i)); error_at (DECL_SOURCE_LOCATION (xauto), "invalid use of %qT in template argument", xauto); errors = true; } return errors; } /* For a given template T, return the vector of typedefs referenced in T for which access check is needed at T instantiation time. T is either a FUNCTION_DECL or a RECORD_TYPE. Those typedefs were added to T by the function append_type_to_template_for_access_check. */ vec<qualified_typedef_usage_t, va_gc> * get_types_needing_access_check (tree t) { tree ti; vec<qualified_typedef_usage_t, va_gc> *result = NULL; if (!t || t == error_mark_node) return NULL; if (!(ti = get_template_info (t))) return NULL; if (CLASS_TYPE_P (t) || TREE_CODE (t) == FUNCTION_DECL) { if (!TI_TEMPLATE (ti)) return NULL; result = TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti); } return result; } /* Append the typedef TYPE_DECL used in template T to a list of typedefs tied to T. That list of typedefs will be access checked at T instantiation time. T is either a FUNCTION_DECL or a RECORD_TYPE. TYPE_DECL is a TYPE_DECL node representing a typedef. SCOPE is the scope through which TYPE_DECL is accessed. LOCATION is the location of the usage point of TYPE_DECL. This function is a subroutine of append_type_to_template_for_access_check. */ static void append_type_to_template_for_access_check_1 (tree t, tree type_decl, tree scope, location_t location) { qualified_typedef_usage_t typedef_usage; tree ti; if (!t || t == error_mark_node) return; gcc_assert ((TREE_CODE (t) == FUNCTION_DECL || CLASS_TYPE_P (t)) && type_decl && TREE_CODE (type_decl) == TYPE_DECL && scope); if (!(ti = get_template_info (t))) return; gcc_assert (TI_TEMPLATE (ti)); typedef_usage.typedef_decl = type_decl; typedef_usage.context = scope; typedef_usage.locus = location; vec_safe_push (TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti), typedef_usage); } /* Append TYPE_DECL to the template TEMPL. TEMPL is either a class type, a FUNCTION_DECL or a TEMPLATE_DECL. At TEMPL instanciation time, TYPE_DECL will be checked to see if it can be accessed through SCOPE. LOCATION is the location of the usage point of TYPE_DECL. e.g. consider the following code snippet: class C { typedef int myint; }; template<class U> struct S { C::myint mi; // <-- usage point of the typedef C::myint }; S<char> s; At S<char> instantiation time, we need to check the access of C::myint In other words, we need to check the access of the myint typedef through the C scope. For that purpose, this function will add the myint typedef and the scope C through which its being accessed to a list of typedefs tied to the template S. That list will be walked at template instantiation time and access check performed on each typedefs it contains. Note that this particular code snippet should yield an error because myint is private to C. */ void append_type_to_template_for_access_check (tree templ, tree type_decl, tree scope, location_t location) { qualified_typedef_usage_t *iter; unsigned i; gcc_assert (type_decl && (TREE_CODE (type_decl) == TYPE_DECL)); /* Make sure we don't append the type to the template twice. */ FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (templ), i, iter) if (iter->typedef_decl == type_decl && scope == iter->context) return; append_type_to_template_for_access_check_1 (templ, type_decl, scope, location); } /* Recursively walk over && expressions searching for EXPR. Return a reference to that expression. */ static tree *find_template_requirement (tree *t, tree key) { if (*t == key) return t; if (TREE_CODE (*t) == TRUTH_ANDIF_EXPR) { if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 0), key)) return p; if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 1), key)) return p; } return 0; } /* Convert the generic type parameters in PARM that match the types given in the range [START_IDX, END_IDX) from the current_template_parms into generic type packs. */ tree convert_generic_types_to_packs (tree parm, int start_idx, int end_idx) { tree current = current_template_parms; int depth = TMPL_PARMS_DEPTH (current); current = INNERMOST_TEMPLATE_PARMS (current); tree replacement = make_tree_vec (TREE_VEC_LENGTH (current)); for (int i = 0; i < start_idx; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); for (int i = start_idx; i < end_idx; ++i) { /* Create a distinct parameter pack type from the current parm and add it to the replacement args to tsubst below into the generic function parameter. */ tree node = TREE_VEC_ELT (current, i); tree o = TREE_TYPE (TREE_VALUE (node)); tree t = copy_type (o); TEMPLATE_TYPE_PARM_INDEX (t) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (o), t, 0, 0, tf_none); TREE_TYPE (TEMPLATE_TYPE_DECL (t)) = t; TYPE_STUB_DECL (t) = TYPE_NAME (t) = TEMPLATE_TYPE_DECL (t); TYPE_MAIN_VARIANT (t) = t; TEMPLATE_TYPE_PARAMETER_PACK (t) = true; TYPE_CANONICAL (t) = canonical_type_parameter (t); TREE_VEC_ELT (replacement, i) = t; /* Replace the current template parameter with new pack. */ TREE_VALUE (node) = TREE_CHAIN (t); /* Surgically adjust the associated constraint of adjusted parameter and it's corresponding contribution to the current template requirements. */ if (tree constr = TEMPLATE_PARM_CONSTRAINTS (node)) { tree id = unpack_concept_check (constr); TREE_VEC_ELT (TREE_OPERAND (id, 1), 0) = t; tree fold = finish_left_unary_fold_expr (constr, TRUTH_ANDIF_EXPR); TEMPLATE_PARM_CONSTRAINTS (node) = fold; /* If there was a constraint, we also need to replace that in the template requirements, which we've already built. */ tree *reqs = &TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); reqs = find_template_requirement (reqs, constr); *reqs = fold; } } for (int i = end_idx, e = TREE_VEC_LENGTH (current); i < e; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); /* If there are more levels then build up the replacement with the outer template parms. */ if (depth > 1) replacement = add_to_template_args (template_parms_to_args (TREE_CHAIN (current_template_parms)), replacement); return tsubst (parm, replacement, tf_none, NULL_TREE); } /* __integer_pack(N) in a pack expansion expands to a sequence of numbers from 0..N-1. */ void declare_integer_pack (void) { tree ipfn = push_library_fn (get_identifier ("__integer_pack"), build_function_type_list (integer_type_node, integer_type_node, NULL_TREE), NULL_TREE, ECF_CONST); DECL_DECLARED_CONSTEXPR_P (ipfn) = true; set_decl_built_in_function (ipfn, BUILT_IN_FRONTEND, CP_BUILT_IN_INTEGER_PACK); } /* Set up the hash tables for template instantiations. */ void init_template_processing (void) { /* FIXME: enable sanitization (PR87847) */ decl_specializations = hash_table<spec_hasher>::create_ggc (37, false); type_specializations = hash_table<spec_hasher>::create_ggc (37, false); if (cxx_dialect >= cxx11) declare_integer_pack (); } /* Print stats about the template hash tables for -fstats. */ void print_template_statistics (void) { fprintf (stderr, "decl_specializations: size %ld, %ld elements, " "%f collisions\n", (long) decl_specializations->size (), (long) decl_specializations->elements (), decl_specializations->collisions ()); fprintf (stderr, "type_specializations: size %ld, %ld elements, " "%f collisions\n", (long) type_specializations->size (), (long) type_specializations->elements (), type_specializations->collisions ()); } #if CHECKING_P namespace selftest { /* Verify that build_non_dependent_expr () works, for various expressions, and that location wrappers don't affect the results. */ static void test_build_non_dependent_expr () { location_t loc = BUILTINS_LOCATION; /* Verify constants, without and with location wrappers. */ tree int_cst = build_int_cst (integer_type_node, 42); ASSERT_EQ (int_cst, build_non_dependent_expr (int_cst)); tree wrapped_int_cst = maybe_wrap_with_location (int_cst, loc); ASSERT_TRUE (location_wrapper_p (wrapped_int_cst)); ASSERT_EQ (wrapped_int_cst, build_non_dependent_expr (wrapped_int_cst)); tree string_lit = build_string (4, "foo"); TREE_TYPE (string_lit) = char_array_type_node; string_lit = fix_string_type (string_lit); ASSERT_EQ (string_lit, build_non_dependent_expr (string_lit)); tree wrapped_string_lit = maybe_wrap_with_location (string_lit, loc); ASSERT_TRUE (location_wrapper_p (wrapped_string_lit)); ASSERT_EQ (wrapped_string_lit, build_non_dependent_expr (wrapped_string_lit)); } /* Verify that type_dependent_expression_p () works correctly, even in the presence of location wrapper nodes. */ static void test_type_dependent_expression_p () { location_t loc = BUILTINS_LOCATION; tree name = get_identifier ("foo"); /* If no templates are involved, nothing is type-dependent. */ gcc_assert (!processing_template_decl); ASSERT_FALSE (type_dependent_expression_p (name)); ++processing_template_decl; /* Within a template, an unresolved name is always type-dependent. */ ASSERT_TRUE (type_dependent_expression_p (name)); /* Ensure it copes with NULL_TREE and errors. */ ASSERT_FALSE (type_dependent_expression_p (NULL_TREE)); ASSERT_FALSE (type_dependent_expression_p (error_mark_node)); /* A USING_DECL in a template should be type-dependent, even if wrapped with a location wrapper (PR c++/83799). */ tree using_decl = build_lang_decl (USING_DECL, name, NULL_TREE); TREE_TYPE (using_decl) = integer_type_node; ASSERT_TRUE (type_dependent_expression_p (using_decl)); tree wrapped_using_decl = maybe_wrap_with_location (using_decl, loc); ASSERT_TRUE (location_wrapper_p (wrapped_using_decl)); ASSERT_TRUE (type_dependent_expression_p (wrapped_using_decl)); --processing_template_decl; } /* Run all of the selftests within this file. */ void cp_pt_c_tests () { test_build_non_dependent_expr (); test_type_dependent_expression_p (); } } // namespace selftest #endif /* #if CHECKING_P */ #include "gt-cp-pt.h"
omp_doacross.c
// RUN: %libomp-compile-and-run // REQUIRES: openmp-4.5 // XFAIL: gcc-4, gcc-5, clang-3.7, clang-3.8, icc-15, icc-16 #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" #ifndef N #define N 750 #endif int test_doacross() { int i, j; // Allocate and zero out the matrix int *m = (int *)malloc(sizeof(int) * N * N); for (i = 0; i < N; ++i) { for (j = 0; j < N; ++j) { m[i * N + j] = 0; } } // Have first row and column be 0, 1, 2, 3, etc. for (i = 0; i < N; ++i) m[i * N] = i; for (j = 0; j < N; ++j) m[j] = j; // Perform wavefront which results in matrix: // 0 1 2 3 4 // 1 2 3 4 5 // 2 3 4 5 6 // 3 4 5 6 7 // 4 5 6 7 8 #pragma omp parallel shared(m) { int row, col; #pragma omp for ordered(2) for (row = 1; row < N; ++row) { for (col = 1; col < N; ++col) { #pragma omp ordered depend(sink : row - 1, col) depend(sink : row, col - 1) m[row * N + col] = m[(row - 1) * N + col] + m[row * N + (col - 1)] - m[(row - 1) * N + (col - 1)]; #pragma omp ordered depend(source) } } } // Check the bottom right element to see if iteration dependencies were held int retval = (m[(N - 1) * N + N - 1] == 2 * (N - 1)); free(m); return retval; } int main(int argc, char **argv) { int i; int num_failed = 0; if (omp_get_max_threads() < 2) omp_set_num_threads(4); for (i = 0; i < REPETITIONS; i++) { if (!test_doacross()) { num_failed++; } } return num_failed; }
GB_binop__lt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_int64 // A.*B function (eWiseMult): GB_AemultB__lt_int64 // A*D function (colscale): GB_AxD__lt_int64 // D*A function (rowscale): GB_DxB__lt_int64 // C+=B function (dense accum): GB_Cdense_accumB__lt_int64 // C+=b function (dense accum): GB_Cdense_accumb__lt_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_int64 // C=scalar+B GB_bind1st__lt_int64 // C=scalar+B' GB_bind1st_tran__lt_int64 // C=A+scalar GB_bind2nd__lt_int64 // C=A'+scalar GB_bind2nd_tran__lt_int64 // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT64 || GxB_NO_LT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lt_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
knucleotide-9.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // This controls the initial size used for the hash tables. #define INITIAL_HASH_TABLE_SIZE 64 // This controls the maximum length for each set of nucleotide sequence // frequencies and each nucleotide sequence count output by this program. #define MAXIMUM_OUTPUT_LENGTH 4096 #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> typedef struct ht_ht ht_ht; typedef struct ht_node ht_node; // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; // The hash table implementation provided by simple_hash3.h doesn't // automatically grow hash tables (although the chained linked lists can grow // infinitely long) and it also doesn't provide any function for growing a hash // table so we create our own function for checking the hash table density and // growing the hash table if necessary. static void check_Hash_Table_Density_And_Grow_If_Necessary( ht_ht ** const hash_Table_To_Check){ if(ht_count(*hash_Table_To_Check) > (*hash_Table_To_Check)->size){ // Create a new grown_Hash_Table which is at least four times the size // of the current hash_Table_To_Check. ht_ht * grown_Hash_Table=ht_create(4 * (*hash_Table_To_Check)->size); // Copy all the ht_nodes from the current hash_Table_To_Check to the new // grown_Hash_Table. ht_node *HT_Node_Pointer=ht_first(*hash_Table_To_Check); for(intnative_t i=0; i<ht_count(*hash_Table_To_Check); i++){ ht_find_new(grown_Hash_Table, HT_Node_Pointer->key)->val= HT_Node_Pointer->val; HT_Node_Pointer=ht_next(*hash_Table_To_Check); } // Destroy the old hash_Table_To_Check and update its pointer to point // to the new grown_Hash_Table. ht_destroy(*hash_Table_To_Check); *hash_Table_To_Check=grown_Hash_Table; } } // Function to use when sorting ht_nodes with qsort() later. ht_nodes with // larger values will come first and in cases of identical values then ht_nodes // with smaller keys will come first. static int HT_Node_Compare(const void * const uncasted_Left_HT_Node, const void * const uncasted_Right_HT_Node){ const ht_node * left_HT_Node=uncasted_Left_HT_Node, * right_HT_Node=uncasted_Right_HT_Node; // Sort based on ht_node values. if(left_HT_Node->val < right_HT_Node->val) return 1; if(left_HT_Node->val > right_HT_Node->val) return -1; // If we got here then both items have the same value so then sort based on // key. if(left_HT_Node->key > right_HT_Node->key) return 1; else return -1; } // Macro to convert a nucleotide character to a code. Note that upper and lower // case ASCII letters only differ in the fifth bit from the right and we only // need the three least significant bits to differentiate the letters 'A', 'C', // 'G', and 'T'. Spaces in this array/string will never be used as long as // characters other than 'A', 'C', 'G', and 'T' aren't used. #define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0x7]) // And one more macro to convert the codes back to nucleotide characters. #define nucleotide_For_Code(code) ("ACGT"[code & 0x3]) // Generate frequences for all nucleotide sequences in sequences that are of // length sequence_Length and then save it to output. static void generate_Frequencies_For_Sequences(const char * const sequences, const intnative_t sequences_Length, intnative_t sequence_Length, char * const output){ ht_ht * hash_Table=ht_create(INITIAL_HASH_TABLE_SIZE); // Add all the sequences of sequence_Length to hash_Table. uint64_t key=0; for(intnative_t i=0; i<sequences_Length; i++){ const uint64_t mask=((uint64_t)1<<2*sequence_Length)-1; key=(key<<2 & mask) | sequences[i]; if(i>=sequence_Length-1){ ht_find_new(hash_Table, key)->val++; check_Hash_Table_Density_And_Grow_If_Necessary(&hash_Table); } } // Create an array of ht_nodes from hash_Table. intnative_t HT_Nodes_Array_Size=hash_Table->items; ht_node * HT_Nodes_Array=malloc(HT_Nodes_Array_Size*sizeof(ht_node)); ht_node * HT_Node_Pointer=ht_first(hash_Table); for(intnative_t i=0; i<HT_Nodes_Array_Size; i++){ HT_Nodes_Array[i]=*HT_Node_Pointer; HT_Node_Pointer=ht_next(hash_Table); } ht_destroy(hash_Table); // Sort HT_Nodes_Array. qsort(HT_Nodes_Array, HT_Nodes_Array_Size, sizeof(ht_node), &HT_Node_Compare); // Print the frequencies for each nucleotide sequence. for(intnative_t output_Position=0, i=0; i<HT_Nodes_Array_Size; i++){ char nucleotide_Sequence[sequence_Length+1]; for(intnative_t j=sequence_Length-1; j>-1; j--){ nucleotide_Sequence[j]=nucleotide_For_Code(HT_Nodes_Array[i].key); HT_Nodes_Array[i].key>>=2; } nucleotide_Sequence[sequence_Length]='\0'; // Output the frequency for nucleotide_Sequence to output. output_Position+=snprintf(output+output_Position, MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n", nucleotide_Sequence, 100.0f*HT_Nodes_Array[i].val/sequences_Length); } free(HT_Nodes_Array); } // Generate a count for the number of times nucleotide_Sequence appears in // sequences and then save it to output. static void generate_Count_For_Sequence(const char * const sequences, const intnative_t sequences_Length, const char * const nucleotide_Sequence, char * const output){ const intnative_t nucleotide_Sequence_Length=strlen(nucleotide_Sequence); ht_ht * hash_Table=ht_create(INITIAL_HASH_TABLE_SIZE); uint64_t key=0; for(intnative_t i=0; i<sequences_Length; i++){ const uint64_t mask=((uint64_t)1<<2*nucleotide_Sequence_Length)-1; key=(key<<2 & mask) | sequences[i]; if(i>=nucleotide_Sequence_Length){ ht_find_new(hash_Table, key)->val++; check_Hash_Table_Density_And_Grow_If_Necessary(&hash_Table); } } // Generate key for the sequence. key=0; for(intnative_t i=0; i<nucleotide_Sequence_Length; i++) key=(key<<2) | code_For_Nucleotide(nucleotide_Sequence[i]); // Output the count for nucleotide_Sequence to output. intnative_t count=ht_find(hash_Table, key)->val; snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%jd\t%s", (intmax_t)count, nucleotide_Sequence); ht_destroy(hash_Table); } int main(){ char buffer[4096]; // Find the start of the third nucleotide sequence. while(fgets(buffer, sizeof(buffer), stdin) && memcmp(">THREE", buffer, sizeof(">THREE")-1)); // Start with 1 MB of storage for reading in the nucleotide sequence and // grow exponentially. intnative_t nucleotide_Sequence_Capacity=1048576; intnative_t nucleotide_Sequence_Size=0; char * nucleotide_Sequence=malloc(nucleotide_Sequence_Capacity); // Start reading and encoding the third nucleotide sequence. while(fgets(buffer, sizeof(buffer), stdin) && buffer[0]!='>'){ for(intnative_t i=0; buffer[i]!='\0'; i++){ if(buffer[i]!='\n') nucleotide_Sequence[nucleotide_Sequence_Size++]= code_For_Nucleotide(buffer[i]); } // Make sure we still have enough memory allocated for any potential // nucleotides in the next line. if(nucleotide_Sequence_Capacity-nucleotide_Sequence_Size < sizeof(buffer)){ nucleotide_Sequence_Capacity*=2; nucleotide_Sequence=realloc(nucleotide_Sequence, nucleotide_Sequence_Capacity); } } // Free up any leftover memory. nucleotide_Sequence=realloc(nucleotide_Sequence, nucleotide_Sequence_Size); char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH]; // Do the following functions in parallel. #pragma omp parallel sections { #pragma omp section { generate_Frequencies_For_Sequences(nucleotide_Sequence, nucleotide_Sequence_Size, 1, output_Buffer[0]); } #pragma omp section { generate_Frequencies_For_Sequences(nucleotide_Sequence, nucleotide_Sequence_Size, 2, output_Buffer[1]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGT", output_Buffer[2]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTA", output_Buffer[3]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATT", output_Buffer[4]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATTTTAATT", output_Buffer[5]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATTTTAATTTATAGT", output_Buffer[6]); } } for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++])); free(nucleotide_Sequence); return 0; }
main.c
#include <nanvix/sys/perf.h> #include <nanvix/sys/thread.h> #include <nanvix/ulib.h> #include <posix/stdint.h> #include <kbench.h> //#include <libgomp/src/omp.h> #define NTHREADS_MAX (THREAD_MAX - 1) /**< Maximum Number of Working Threads */ /*============================================================================* * Benchmark Driver * *============================================================================*/ /** * @brief Fork-Join Benchmark * * @param argc Argument counter. * @param argv Argument variables. */ void GOMP_parallel() { uprintf("reinaldo reinaldo"); return; } void GOMP_parallel_start() { return;} void *Hello_rei(void *teste); int __main2() { // int i,k; // int id_thread[NTHREADS_MAX]; // kthread_t tid[NTHREADS_MAX]; #pragma omp parallel uprintf("Hello from thread %d\n",1); // for(i=0; i<NTHREADS_MAX;i++) { // kthread_create(&tid[i], Hello_rei, ((void*)( (intptr_t) i))); // } // for(k = 0; k < NTHREADS_MAX; k++){ // kthread_join(tid[k], NULL); // } return (0); } void *Hello_rei(void *teste) { int id; id = (int)((intptr_t)teste); uprintf("Hello from thread %d\n",id); return NULL; }
add.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "timers.h" //--------------------------------------------------------------------- // addition of update to the vector u //--------------------------------------------------------------------- void add() { int i, j, k, m; //kai //int k15; // consistent_data(&k15, "int", 1); if (timeron) timer_start(t_add); #pragma omp parallel for default(shared) private(i,j,k,m) for (k = k15+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m]; } } } //kai k15 = 0; // printf("k15=%p\n",&k15); } if (timeron) timer_stop(t_add); }
parallelFor.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int sumaglobal=0; int main(int argc, char *argv[]){ int numeroDeHilos=strtol(argv[1],NULL,10); long a[20],b[20]; long resultado=0; int j; for(j=0;j<20;j++){ a[j]=j*j; b[j]=j; } int i; #pragma omp parallel for num_threads(numeroDeHilos) \ reduction(+ : resultado) for(i=0;i<20;i++) resultado+=a[i]+b[i]; printf("El resultado de la operación es %ld\n", resultado); return 0; }
MPC_SHA256.c
/* ============================================================================ Name : MPC_SHA256.c Author : Sobuno Version : 0.1 Description : MPC SHA256 for one block only ============================================================================ */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "shared.h" #include "omp.h" #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 136; uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint32_t t[3] = { 0 }; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y[0]^c[0],i); b[1]=GETBIT(y[1]^c[1],i); b[2]=GETBIT(y[2]^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y[0]^c[0]; z[1]=x[1]^y[1]^c[1]; z[2]=x[2]^y[2]^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y^c[0],i); b[1]=GETBIT(y^c[1],i); b[2]=GETBIT(y^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y^c[0]; z[1]=x[1]^y^c[1]; z[2]=x[2]^y^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } int sha256(unsigned char* result, unsigned char* input, int numBits) { uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int chars = numBits >> 3; unsigned char* chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); chunk[chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest //chunk[60] = numBits >> 24; //chunk[61] = numBits >> 16; chunk[62] = numBits >> 8; chunk[63] = numBits; uint32_t w[64]; int i; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; //e & (f^g) ^ g mpc_XOR(f,g,t0); mpc_AND(e,t0,t0, randomness, randCount, views, countY); mpc_XOR(t0,g,z); } int mpc_sha256(unsigned char* results[3], unsigned char* inputs[3], int numBits, unsigned char *randomness[3], View views[3], int* countY) { if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int* randCount = calloc(1, sizeof(int)); int chars = numBits >> 3; unsigned char* chunks[3]; uint32_t w[64][3]; for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); //512 bits memcpy(chunks[i], inputs[i], chars); chunks[i][chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest //chunk[60] = numBits >> 24; //chunk[61] = numBits >> 16; chunks[i][62] = numBits >> 8; chunks[i][63] = numBits; memcpy(views[i].x, chunks[i], 64); for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3); mpc_RIGHTROTATE(w[j-15], 7, t0); mpc_RIGHTROTATE(w[j-15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10); mpc_RIGHTROTATE(w[j-2], 17, t0); mpc_RIGHTROTATE(w[j-2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i]; mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = { hA[0],hA[0],hA[0] }; uint32_t b[3] = { hA[1],hA[1],hA[1] }; uint32_t c[3] = { hA[2],hA[2],hA[2] }; uint32_t d[3] = { hA[3],hA[3],hA[3] }; uint32_t e[3] = { hA[4],hA[4],hA[4] }; uint32_t f[3] = { hA[5],hA[5],hA[5] }; uint32_t g[3] = { hA[6],hA[6],hA[6] }; uint32_t h[3] = { hA[7],hA[7],hA[7] }; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e,f,g) + k[i]+w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1 (h+s1+ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0+maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //e = d+temp1; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); //a = temp1+temp2; mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; mpc_ADD(hHa[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hHa[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hHa[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hHa[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hHa[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hHa[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hHa[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hHa[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } free(randCount); return 0; } int writeToFile(char filename[], void* data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) { if(RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if(RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } a commit(int numBytes,unsigned char shares[3][numBytes], unsigned char *randomness[3], View views[3]) { unsigned char* inputs[3]; inputs[0] = shares[0]; inputs[1] = shares[1]; inputs[2] = shares[2]; unsigned char* hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); int* countY = calloc(1, sizeof(int)); mpc_sha256(hashes, inputs, numBytes * 8, randomness, views, countY); //Explicitly add y to view for(int i = 0; i<8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; *countY += 1; } free(countY); free(hashes[0]); free(hashes[1]); free(hashes[2]); uint32_t* result1 = malloc(32); output(views[0], result1); uint32_t* result2 = malloc(32); output(views[1], result2); uint32_t* result3 = malloc(32); output(views[2], result3); a a; memcpy(a.yp[0], result1, 32); memcpy(a.yp[1], result2, 32); memcpy(a.yp[2], result3, 32); free(result1); free(result2); free(result3); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; memcpy(z.re, rs[e],4); memcpy(z.re1, rs[(e + 1) % 3],4); return z; } int main(void) { setbuf(stdout, NULL); srand((unsigned) time(NULL)); init_EVP(); openmp_thread_setup(); // unsigned char garbage[4]; if(RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } printf("Iterations of SHA: %d\n", NUM_ROUNDS); unsigned char input[] = {'a','b','c','d','\0'}; int i = strlen(input) - 1; clock_t begin = clock(), delta, deltaA; unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; //Generating keys clock_t beginCrypto = clock(), deltaCrypto; if(RAND_bytes(keys, NUM_ROUNDS*3*16) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } if(RAND_bytes(rs, NUM_ROUNDS*3*4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } deltaCrypto = clock() - beginCrypto; int inMilliCrypto = deltaCrypto * 1000 / CLOCKS_PER_SEC; totalCrypto = inMilliCrypto; //Sharing secrets clock_t beginSS = clock(), deltaSS; unsigned char shares[NUM_ROUNDS][3][i]; if(RAND_bytes(shares, NUM_ROUNDS*3*i) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { for (int j = 0; j < i; j++) { shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j]; } } deltaSS = clock() - beginSS; int inMilli = deltaSS * 1000 / CLOCKS_PER_SEC; totalSS = inMilli; //Generating randomness clock_t beginRandom = clock(), deltaRandom; unsigned char *randomness[NUM_ROUNDS][3]; #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { for(int j = 0; j<3; j++) { randomness[k][j] = malloc(2912*sizeof(unsigned char)); getAllRandomness(keys[k][j], randomness[k][j]); } } deltaRandom = clock() - beginRandom; inMilli = deltaRandom * 1000 / CLOCKS_PER_SEC; totalRandom = inMilli; //Running MPC-SHA2 clock_t beginSha = clock(), deltaSha; #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { as[k] = commit(i, shares[k], randomness[k], localViews[k]); for(int j=0; j<3; j++) { free(randomness[k][j]); } } deltaSha = clock() - beginSha; inMilli = deltaSha * 1000 / CLOCKS_PER_SEC; totalSha = inMilli; //Committing clock_t beginHash = clock(), deltaHash; #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; H(keys[k][0], localViews[k][0], rs[k][0], &hash1); memcpy(as[k].h[0], &hash1, 32); H(keys[k][1], localViews[k][1], rs[k][1], &hash1); memcpy(as[k].h[1], &hash1, 32); H(keys[k][2], localViews[k][2], rs[k][2], &hash1); memcpy(as[k].h[2], &hash1, 32); } deltaHash = clock() - beginHash; inMilli = deltaHash * 1000 / CLOCKS_PER_SEC; totalHash += inMilli; deltaA = clock() - begin; int inMilliA = deltaA * 1000 / CLOCKS_PER_SEC; //Generating E clock_t beginE = clock(), deltaE; int es[NUM_ROUNDS]; uint32_t finalHash[8]; for (int j = 0; j < 8; j++) { finalHash[j] = as[0].yp[0][j]^as[0].yp[1][j]^as[0].yp[2][j]; } H3(finalHash, as, NUM_ROUNDS, es); deltaE = clock() - beginE; int inMilliE = deltaE * 1000 / CLOCKS_PER_SEC; //Packing Z clock_t beginZ = clock(), deltaZ; z* zs = malloc(sizeof(z)*NUM_ROUNDS); #pragma omp parallel for for(int i = 0; i<NUM_ROUNDS; i++) { zs[i] = prove(es[i],keys[i],rs[i], localViews[i]); } deltaZ = clock() - beginZ; int inMilliZ = deltaZ * 1000 / CLOCKS_PER_SEC; //Writing to file clock_t beginWrite = clock(); FILE *file; char outputFile[3*sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(as, sizeof(a), NUM_ROUNDS, file); fwrite(zs, sizeof(z), NUM_ROUNDS, file); fclose(file); clock_t deltaWrite = clock()-beginWrite; free(zs); int inMilliWrite = deltaWrite * 1000 / CLOCKS_PER_SEC; delta = clock() - begin; inMilli = delta * 1000 / CLOCKS_PER_SEC; int sumOfParts = 0; printf("Generating A: %ju\n", (uintmax_t)inMilliA); printf(" Generating keys: %ju\n", (uintmax_t)totalCrypto); sumOfParts += totalCrypto; printf(" Generating randomness: %ju\n", (uintmax_t)totalRandom); sumOfParts += totalRandom; printf(" Sharing secrets: %ju\n", (uintmax_t)totalSS); sumOfParts += totalSS; printf(" Running MPC-SHA2: %ju\n", (uintmax_t)totalSha); sumOfParts += totalSha; printf(" Committing: %ju\n", (uintmax_t)totalHash); sumOfParts += totalHash; printf(" *Accounted for*: %ju\n", (uintmax_t)sumOfParts); printf("Generating E: %ju\n", (uintmax_t)inMilliE); printf("Packing Z: %ju\n", (uintmax_t)inMilliZ); printf("Writing file: %ju\n", (uintmax_t)inMilliWrite); printf("Total: %d\n",inMilli); printf("\n"); printf("Proof output to file %s", outputFile); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_meshdata_types.h" # include "DNA_object_force_types.h" # include "DNA_object_types.h" # include "DNA_scene_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; i++) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; q++) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; j++) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; i++) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; i++) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->V[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; i++) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor) / 2.0f; } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } float BPH_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3) { /* The result will be 6x the volume */ return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]); } float BPH_tri_area(struct Implicit_Data *data, int v1, int v2, int v3) { float nor[3]; return calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); } void BPH_mass_spring_force_pressure(Implicit_Data *data, int v1, int v2, int v3, float common_pressure, const float *vertex_pressure, const float weights[3]) { float nor[3], area; float factor, base_force; float force[3]; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = area / 3.0f; base_force = common_pressure * factor; /* Compute per-vertex force values from local pressures. * From integrating the pressure over the triangle and deriving * equivalent vertex forces, it follows that: * * force[idx] = (sum(pressure) + pressure[idx]) * area / 12 * * Effectively, 1/4 of the pressure acts just on its vertex, * while 3/4 is split evenly over all three. */ if (vertex_pressure) { copy_v3_fl3(force, vertex_pressure[v1], vertex_pressure[v2], vertex_pressure[v3]); mul_v3_fl(force, factor / 4.0f); base_force += force[0] + force[1] + force[2]; } else { zero_v3(force); } /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, (base_force + force[0]) * weights[0]); madd_v3_v3fl(data->F[v2], nor, (base_force + force[1]) * weights[1]); madd_v3_v3fl(data->F[v3], nor, (base_force + force[2]) * weights[2]); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; b++) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; b++) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
star3d4r.c
#define BENCH_DIM 3 #define BENCH_FPP 49 #define BENCH_RAD 4 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.03228f * A[t%2][i][j][k-4] + 0.03138f * A[t%2][i][j][k-3] + 0.03118f * A[t%2][i][j][k-2] + 0.03027f * A[t%2][i][j][k-1] + 0.03022f * A[t%2][i][j][k+1] + 0.03112f * A[t%2][i][j][k+2] + 0.03132f * A[t%2][i][j][k+3] + 0.03222f * A[t%2][i][j][k+4] + 0.03026f * A[t%2][i-1][j][k] + 0.03024f * A[t%2][i+1][j][k] + 0.03027f * A[t%2][i][j-1][k] + 0.03023f * A[t%2][i][j+1][k] + 0.03116f * A[t%2][i-2][j][k] + 0.03114f * A[t%2][i+2][j][k] + 0.03117f * A[t%2][i][j-2][k] + 0.03113f * A[t%2][i][j+2][k] + 0.03136f * A[t%2][i-3][j][k] + 0.03134f * A[t%2][i+3][j][k] + 0.03137f * A[t%2][i][j-3][k] + 0.03133f * A[t%2][i][j+3][k] + 0.03226f * A[t%2][i-4][j][k] + 0.03224f * A[t%2][i+4][j][k] + 0.03227f * A[t%2][i][j-4][k] + 0.03223f * A[t%2][i][j+4][k]; #pragma endscop } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.03228f * A[t%2][i][j][k-4] + 0.03138f * A[t%2][i][j][k-3] + 0.03118f * A[t%2][i][j][k-2] + 0.03027f * A[t%2][i][j][k-1] + 0.03022f * A[t%2][i][j][k+1] + 0.03112f * A[t%2][i][j][k+2] + 0.03132f * A[t%2][i][j][k+3] + 0.03222f * A[t%2][i][j][k+4] + 0.03026f * A[t%2][i-1][j][k] + 0.03024f * A[t%2][i+1][j][k] + 0.03027f * A[t%2][i][j-1][k] + 0.03023f * A[t%2][i][j+1][k] + 0.03116f * A[t%2][i-2][j][k] + 0.03114f * A[t%2][i+2][j][k] + 0.03117f * A[t%2][i][j-2][k] + 0.03113f * A[t%2][i][j+2][k] + 0.03136f * A[t%2][i-3][j][k] + 0.03134f * A[t%2][i+3][j][k] + 0.03137f * A[t%2][i][j-3][k] + 0.03133f * A[t%2][i][j+3][k] + 0.03226f * A[t%2][i-4][j][k] + 0.03224f * A[t%2][i+4][j][k] + 0.03227f * A[t%2][i][j-4][k] + 0.03223f * A[t%2][i][j+4][k]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
nest_lock.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { //need to use an OpenMP construct so that OMPT will be initalized #pragma omp parallel num_threads(1) print_ids(0); omp_nest_lock_t nest_lock; printf("%" PRIu64 ": &nest_lock: %lli\n", ompt_get_thread_data()->value, (long long) &nest_lock); omp_init_nest_lock(&nest_lock); print_current_address(1); omp_set_nest_lock(&nest_lock); print_current_address(2); omp_set_nest_lock(&nest_lock); print_current_address(3); omp_unset_nest_lock(&nest_lock); print_current_address(4); omp_unset_nest_lock(&nest_lock); print_current_address(5); omp_destroy_nest_lock(&nest_lock); print_current_address(6); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_init_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_nest_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] return 0; }
ab-totient-omp-16.c
// Distributed and parallel technologies, Andrew Beveridge, 03/03/2014 // To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c // To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end #include <stdio.h> #include <omp.h> /* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1). If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */ long getTotient (long number) { long result = number; // Check every prime number below the square root for divisibility if(number % 2 == 0){ result -= result / 2; do number /= 2; while(number %2 == 0); } // Primitive replacement for a list of primes, looping through every odd number long prime; for(prime = 3; prime * prime <= number; prime += 2){ if(number %prime == 0){ result -= result / prime; do number /= prime; while(number % prime == 0); } } // Last common factor if(number > 1) result -= result / number; // Return the result. return result; } // Main method. int main(int argc, char ** argv) { // Load inputs long lower, upper; sscanf(argv[1], "%ld", &lower); sscanf(argv[2], "%ld", &upper); int i; long result = 0.0; // We know the answer if it's 1; no need to execute the function if(lower == 1) { result = 1.0; lower = 2; } #pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(16) // Sum all totients in the specified range for (i = lower; i <= upper; i++) { result = result + getTotient(i); } // Print the result printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result); // A-OK! return 0; }
CArbitrarySlice.h
/////////////////////////////////////////////////////////////////////////////// // $Id$ // // 3DimViewer // Lightweight 3D DICOM viewer. // // Copyright 2008-2016 3Dim Laboratory s.r.o. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// #ifndef __CARBITRARYSLICE_H__ #define __CARBITRARYSLICE_H__ #include <data/CStorageInterface.h> #include <data/CObjectHolder.h> #include <data/CSlice.h> #include <data/storage_ids_core.h> #include <data/CDensityData.h> #include <app/Signals.h> namespace data { class CArbitrarySlice : public CSlice { public: VPL_SHAREDPTR(CArbitrarySlice); protected: //! Scene voxel size. osg::Vec3 m_VoxelSize; //! Type of interpolation ( can be nearest or bilinear ) TInterpolationType m_InterpolationType; //! Current plane position osg::Vec3 m_center; osg::Vec3 m_normal; osg::Vec3 m_origin; //! Right vector osg::Vec3 m_right; //! Size of slice double m_fWidth, m_fHeight; int m_position; int m_positionMin; int m_positionMax; osg::Matrix m_rotationMatrix; vpl::img::CImage16 m_regionXCoords; vpl::img::CImage16 m_regionYCoords; vpl::img::CImage16 m_regionZCoords; bool m_regionCoordsInitialized; public: //! Constructor. CArbitrarySlice(); //! Destructor. virtual ~CArbitrarySlice(); //! Called upon updating from storage virtual void update(const CChangedEntries& Changes); //! Returns true if changes of a given parent entry may affect this object. bool checkDependency(CStorageEntry* pParent) { return true; } //! Re-initializes the slice. virtual void init(); //! Does object contain relevant data? virtual bool hasData() { return false; } //! Returns voxel parameters const osg::Vec3& getSliceVoxelSize() const { return m_VoxelSize; } void setPosition(double position); //! Return plane position int getPosition() const { return m_position; } int getPositionMax() { return m_positionMax; } int getPositionMin() { return m_positionMin; } void setPlaneCenter(const osg::Vec3& newCenter) { m_center = newCenter; recomputePosition(); } //! Return plane position const osg::Vec3& getPlaneCenter() const { return m_center; } //! Return plane normal const osg::Vec3& getPlaneNormal() const { return m_normal; } //! Return plane position const osg::Vec3& getOrigin() const { return m_origin; } //! Return plane up vector const osg::Vec3& getPlaneRight() const { return m_right; } double getSliceWidth() { return m_fWidth; } void setSliceWidth(double width) { m_fWidth = width; } double getSliceHeight() { return m_fHeight; } void setSliceHeight(double height) { m_fHeight = height; } //! Returns width (x-size) of the original image. virtual vpl::tSize getWidth() const; //! Returns height (y-size) of the original image. virtual vpl::tSize getHeight() const; void setRotationMatrix(const osg::Matrix& matrix) { m_rotationMatrix = matrix; recomputePosition(); } const osg::Matrix& getRotationMatrix() { return m_rotationMatrix; } bool computeSamplingParameters(osg::Vec3& outPosition, osg::Vec3& outVec1, osg::Vec3& outVec2, const CChangedEntries* Changes = NULL); template<class VolumeType, class SliceType> bool updateProperty(VolumeType* volume, SliceType* slice) { osg::Vec3 realPosition; osg::Vec3 vec1; osg::Vec3 vec2; if (!computeSamplingParameters(realPosition, vec1, vec2)) { return false; } data::CObjectPtr<data::CDensityData> spDensityVolume(APP_STORAGE.getEntry(VPL_SIGNAL(SigGetActiveDataSet).invoke2())); // Calculate voxel size #if(0) osg::Vec3 v2(spDensityVolume->getDX() * vec2[0], spDensityVolume->getDY() * vec2[1], spDensityVolume->getDZ() * vec2[2]); m_VoxelSize[0] = v2.length(); osg::Vec3 v1(spDensityVolume->getDX() * vec1[0], spDensityVolume->getDY() * vec1[1], spDensityVolume->getDZ() * vec1[2]); m_VoxelSize[1] = v1.length(); #else osg::Vec3f voxelSizeB = osg::Vec3f(1 / spDensityVolume->getDX(), 1 / spDensityVolume->getDY(), 1 / spDensityVolume->getDZ()); osg::Vec3f vvec1 = osg::componentMultiply(voxelSizeB, vec1); vvec1.normalize(); osg::Vec3f vvec2 = osg::componentMultiply(voxelSizeB, vec2); vvec2.normalize(); osg::Vec3 v2(spDensityVolume->getDX() * vvec2[0], spDensityVolume->getDY() * vvec2[1], spDensityVolume->getDZ() * vvec2[2]); m_VoxelSize[0] = v2.length(); osg::Vec3 v1(spDensityVolume->getDX() * vvec1[0], spDensityVolume->getDY() * vvec1[1], spDensityVolume->getDZ() * vvec1[2]); m_VoxelSize[1] = v1.length(); #endif if (0 == m_VoxelSize[0] || 0 == m_VoxelSize[1]) { return false; } // hotfix for extra small voxel sizes to avoid excessive memory requirements m_VoxelSize[0] = std::max(0.01f, m_VoxelSize[0]); m_VoxelSize[1] = std::max(0.01f, m_VoxelSize[1]); // convert to volume coordinates data::CCoordinatesConv CoordConv = VPL_SIGNAL(SigGetActiveConvObject).invoke2(); // Calculate voxel size of slice //osg::Vec3d voxelSize = osg::Vec3d(spDensityVolume->getDX(), spDensityVolume->getDY(), spDensityVolume->getDZ()); double plengthW = m_fWidth / m_VoxelSize[0]; double plengthH = m_fHeight / m_VoxelSize[1]; // initialize parameter along the slice //double tW = -(plengthW - 1) * 0.5, // tH = -(plengthH - 1) * 0.5; // new size of the slice vpl::tSize Width = static_cast<vpl::tSize>(plengthW); vpl::tSize Height = static_cast<vpl::tSize>(plengthH); double cW = (Width - 1) * 0.5; double cH = (Height - 1) * 0.5; slice->resize(Width, Height); osg::Vec3 positionR; positionR[0] = CoordConv.fromRealXd(realPosition[0]) + 0.001; positionR[1] = CoordConv.fromRealYd(realPosition[1]) + 0.001; positionR[2] = CoordConv.fromRealZd(realPosition[2]) + 0.001; #pragma omp parallel for for (vpl::tSize i = 0; i < Width; i++) { for (vpl::tSize j = 0; j < Height; j++) { // compute point on the slice osg::Vec3d point = positionR + vvec2 * (i - cW) + vvec1 * (j - cH); point[2] += m_VoxelSize[2] * 0.5; if (!(point[0] < 0 || point[0] >= volume->getXSize() || point[1] < 0 || point[1] >= volume->getYSize() || point[2] < 0 || point[2] >= volume->getZSize())) { vpl::img::CPoint3D p(point[0], point[1], point[2]); vpl::tSize xx, yy, zz; xx = static_cast<vpl::tSize>(point[0]); yy = static_cast<vpl::tSize>(point[1]); zz = static_cast<vpl::tSize>(point[2]); (*slice)(i, j) = volume->at(xx, yy, zz); } else { (*slice)(i, j) = 0; } } } return true; } protected: //! Update slice texture void updateTextureData(const osg::Vec3& position, const osg::Vec3& vec1, const osg::Vec3& vec2, bool updateDensityImage = true, bool updateRegionImage = true); void recomputePosition(); }; namespace Storage { //! Storage identifier of arbitrary slice DECLARE_OBJECT(ArbitrarySlice, data::CArbitrarySlice, CORE_STORAGE_SLICE_ARB_ID); } } // namespace data #endif // __CARBITRARYSLICE_H__
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "ggc.h" #include "function.h" #include "hashtab.h" #include "vec.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_MARKED (IDENTIFIER_NODEs) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). TREE_INDIRECT_USING (in NAMESPACE_DECL). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) PAREN_STRING_LITERAL (in STRING_CST) DECL_PRETTY_FUNCTION_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) OMP_FOR_GIMPLIFYING_P (in OMP_FOR) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) DECL_OVERRIDE_P (in FUNCTION_DECL) IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) OVL_ARG_DEPENDENT (in OVERLOAD) PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) 1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECL_FINAL_P (in FUNCTION_DECL) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) 2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (in LAMBDA_EXPR) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL) 4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, or FIELD_DECL). IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE) DECL_TINFO_P (in VAR_DECL) 5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: unused 3: TYPE_FOR_JAVA. 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) FUNCTION_PARAMETER_PACK_P (in PARM_DECL) USING_DECL_TYPENAME_P (in USING_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) TYPE_DECL_ALIAS_P (in TYPE_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO. For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *namespace_bindings; cxx_binding *bindings; tree class_template_info; tree label_value; }; /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID) #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index_s { struct tree_common common; int index; int level; int orig_level; int num_siblings; tree decl; }; typedef struct template_parm_index_s template_parm_index; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define IDENTIFIER_GLOBAL_VALUE(NODE) \ namespace_binding ((NODE), global_namespace) #define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), global_namespace, (VAL)) #define IDENTIFIER_NAMESPACE_VALUE(NODE) \ namespace_binding ((NODE), current_namespace) #define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), current_namespace, (VAL)) #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ || LAMBDA_FUNCTION_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* The overloaded FUNCTION_DECL. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) TREE_CHAIN (NODE) /* Polymorphic access to FUNCTION and CHAIN. */ #define OVL_CURRENT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE)) #define OVL_NEXT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE) /* If set, this was imported in a using declaration. This is not to confuse with being used somewhere, which is not important for this node. */ #define OVL_USED(NODE) TREE_USED (NODE) /* If set, this OVERLOAD was created for argument-dependent lookup and can be freed afterward. */ #define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE)) struct GTY(()) tree_overload { struct tree_common common; tree function; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base from which the BASELINK_FUNCTIONS came. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ typedef enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED } cp_id_kind; /* The various kinds of C++0x warnings we encounter. */ typedef enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES, /* override controls, override/final */ CPP0X_OVERRIDE_CONTROLS, /* non-static data member initializers */ CPP0X_NSDMI, /* user defined literals */ CPP0X_USER_DEFINED_LITERALS, /* delegating constructors */ CPP0X_DELEGATING_CTORS } cpp0x_warn_str; /* The various kinds of operation used by composite_pointer_type. */ typedef enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR } composite_pointer_operation; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ typedef enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ } expr_list_kind; /* Possible cases of implicit bad rhs conversions. */ typedef enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ } impl_conv_rhs; /* Possible cases of implicit or explicit bad conversions to void. */ typedef enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ } impl_conv_void; /* Macros for access to language-specific slots in an identifier. */ #define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->namespace_bindings) #define IDENTIFIER_TEMPLATE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->class_template_info) /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. It's PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) #define IDENTIFIER_LABEL_VALUE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->label_value) #define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \ IDENTIFIER_LABEL_VALUE (NODE) = (VALUE) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE) /* Nonzero if this identifier is the prefix for a mangled C++ operator name. */ #define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE) /* Nonzero if this identifier is the name of a type-conversion operator. */ #define IDENTIFIER_TYPENAME_P(NODE) \ TREE_LANG_FLAG_4 (NODE) /* Nonzero if this identifier is the name of a constructor or destructor. */ #define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \ TREE_LANG_FLAG_3 (NODE) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (NAME)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct GTY (()) tree_default_arg { struct tree_common common; struct cp_token_cache *tokens; VEC(tree,gc) *instantiations; }; #define DEFERRED_NOEXCEPT_PATTERN(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) #define DEFERRED_NOEXCEPT_ARGS(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) #define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT \ || is_overloaded_fn (TREE_PURPOSE (NODE)))) struct GTY (()) tree_deferred_noexcept { struct tree_base base; tree pattern; tree args; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ typedef enum cp_trait_kind { CPTK_BASES, CPTK_DIRECT_BASES, CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_CONVERTIBLE_TO, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_FINAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_UNION, CPTK_UNDERLYING_TYPE } cp_trait_kind; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; enum cp_trait_kind kind; }; /* Based off of TYPE_ANONYMOUS_P. */ #define LAMBDA_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LAMBDANAME_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* During parsing of the lambda, the node in the capture-list that holds the 'this' capture. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* True iff we should try to deduce the lambda return type from any return statement. */ #define LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P(NODE) \ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) /* The return type in the expression. * NULL_TREE indicates that none was specified. */ #define LAMBDA_EXPR_RETURN_TYPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) /* During parsing of the lambda, a vector of capture proxies which need to be pushed once we're done processing a nested lambda. */ #define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) /* The closure type of the lambda. Note that the TREE_TYPE of a LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the LAMBDA_EXPR in order to instantiate the type. */ #define LAMBDA_EXPR_CLOSURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure) struct GTY (()) tree_lambda_expr { struct tree_typed typed; tree capture_list; tree this_capture; tree return_type; tree extra_scope; tree closure; VEC(tree,gc)* pending_proxies; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; DEF_VEC_O (qualified_typedef_usage_t); DEF_VEC_ALLOC_O (qualified_typedef_usage_t,gc); struct GTY(()) tree_template_info { struct tree_common common; VEC(qualified_typedef_usage_t,gc) *typedefs_needing_access_checking; }; enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_BINDING, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_WRAPPER, TS_CP_DEFAULT_ARG, TS_CP_DEFERRED_NOEXCEPT, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, TS_CP_USERDEF_LITERAL, LAST_TS_CP_ENUM }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) userdef_literal; }; enum cp_tree_index { CPTI_JAVA_BYTE_TYPE, CPTI_JAVA_SHORT_TYPE, CPTI_JAVA_INT_TYPE, CPTI_JAVA_LONG_TYPE, CPTI_JAVA_FLOAT_TYPE, CPTI_JAVA_DOUBLE_TYPE, CPTI_JAVA_CHAR_TYPE, CPTI_JAVA_BOOLEAN_TYPE, CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_DEPENDENT_LAMBDA_RETURN_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_GLOBAL_DELETE_FNDECL, CPTI_AGGR_TAG, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_NELTS_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_LANG_NAME_JAVA, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_JCLASS, CPTI_TERMINATE, CPTI_CALL_UNEXPECTED, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_KEYED_CLASSES, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE] #define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE] #define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE] #define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE] #define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE] #define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE] #define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE] #define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE] #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define dependent_lambda_return_type_node cp_global_trees[CPTI_DEPENDENT_LAMBDA_RETURN_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the std namespace. */ #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] #define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA] /* Exception specifier used for throw(). */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] /* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */ #define jclass_node cp_global_trees[CPTI_JCLASS] /* The declaration for `std::terminate'. */ #define terminate_node cp_global_trees[CPTI_TERMINATE] /* The declaration for "__cxa_call_unexpected". */ #define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A TREE_LIST of the dynamic classes whose vtables may have to be emitted in this translation unit. */ #define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node /* Global state. */ struct GTY(()) saved_scope { VEC(cxx_saved_binding,gc) *old_bindings; tree old_namespace; VEC(tree,gc) *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; VEC(tree,gc) *lang_base; tree lang_name; tree template_parms; cp_binding_level *x_previous_class_level; tree x_saved_tree; /* Only used for uses of this in trailing return type. */ tree x_current_class_ptr; tree x_current_class_ref; int x_processing_template_decl; int x_processing_specialization; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; int unevaluated_operand; int inhibit_evaluation_warnings; struct stmt_tree_s x_stmt_tree; cp_binding_level *class_bindings; cp_binding_level *bindings; struct saved_scope *prev; }; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A list of private types mentioned, for deferred access checking. */ extern GTY(()) struct saved_scope *scope_chain; struct GTY(()) cxx_int_tree_map { unsigned int uid; tree to; }; extern unsigned int cxx_int_tree_map_hash (const void *); extern int cxx_int_tree_map_eq (const void *, const void *); /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD x_in_function_try_handler : 1; BOOL_BITFIELD x_in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; htab_t GTY((param_is(struct named_label_entry))) x_named_labels; cp_binding_level *bindings; VEC(tree,gc) *x_local_names; htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ptr \ : &scope_chain->x_current_class_ptr)) #define current_class_ref \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ref \ : &scope_chain->x_current_class_ref)) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->x_in_base_initializer #define in_function_try_handler cp_function_chain->x_in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* True if NAME is the IDENTIFIER_NODE for an overloaded "operator new" or "operator delete". */ #define NEW_DELETE_OPNAME_P(NAME) \ ((NAME) == ansi_opname (NEW_EXPR) \ || (NAME) == ansi_opname (VEC_NEW_EXPR) \ || (NAME) == ansi_opname (DELETE_EXPR) \ || (NAME) == ansi_opname (VEC_DELETE_EXPR)) #define ansi_opname(CODE) \ (operator_name_info[(int) (CODE)].identifier) #define ansi_assopname(CODE) \ (assignment_operator_name_info[(int) (CODE)].identifier) /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus, lang_java }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE))) #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_ANONYMOUS_P(NODE) \ (TAGGED_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE \ || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (T) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define TAGGED_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) #define IS_OVERLOAD_TYPE(T) TAGGED_TYPE_P (T) /* True if this a "Java" type, defined in 'extern "Java"'. */ #define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL) != NULL_TREE) /* Nonzero iff TYPE is uniquely derived from PARENT. Ignores accessibility. */ #define UNIQUELY_DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_unique | ba_quiet, NULL) != NULL_TREE) /* Nonzero iff TYPE is publicly & uniquely derived from PARENT. */ #define PUBLICLY_UNIQUELY_DERIVED_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_ignore_scope | ba_check | ba_quiet, \ NULL) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) typedef struct GTY (()) tree_pair_s { tree purpose; tree value; } tree_pair_s; typedef tree_pair_s *tree_pair_p; DEF_VEC_O (tree_pair_s); DEF_VEC_ALLOC_O (tree_pair_s,gc); /* This is a few header flags for 'struct lang_type'. Actually, all but the first are used only for lang_type_class; they are put in this structure to save space. */ struct GTY(()) lang_type_header { BOOL_BITFIELD is_lang_type_class : 1; BOOL_BITFIELD has_type_conversion : 1; BOOL_BITFIELD has_copy_ctor : 1; BOOL_BITFIELD has_default_ctor : 1; BOOL_BITFIELD const_needs_init : 1; BOOL_BITFIELD ref_needs_init : 1; BOOL_BITFIELD has_const_copy_assign : 1; BOOL_BITFIELD spare : 1; }; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type_class { struct lang_type_header h; unsigned char align; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned java_interface : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned use_template : 2; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; unsigned is_final : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 2; tree primary_base; VEC(tree_pair_s,gc) *vcall_indices; tree vtables; tree typeinfo_var; VEC(tree,gc) *vbases; binding_table nested_udts; tree as_base; VEC(tree,gc) *pure_virtuals; tree friend_classes; VEC(tree,gc) * GTY((reorder ("resort_type_method_vec"))) methods; tree key_method; tree decl_list; tree template_info; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* sorted_fields is sorted based on a pointer, so we need to be able to resort it if pointers get rearranged. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) sorted_fields; /* FIXME reuse another field? */ tree lambda_expr; }; struct GTY(()) lang_type_ptrmem { struct lang_type_header h; tree record; }; struct GTY((variable_size)) lang_type { union lang_type_u { struct lang_type_header GTY((skip (""))) h; struct lang_type_class GTY((tag ("1"))) c; struct lang_type_ptrmem GTY((tag ("0"))) ptrmem; } GTY((desc ("%h.h.is_lang_type_class"))) u; }; #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (! lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.c; }) #define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ptrmem; }) #else #define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c) #define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem) #endif /* ENABLE_TREE_CHECKING */ /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that NODE (a class type) is final */ #define CLASSTYPE_FINAL(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->is_final) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector member functions defined in this class. Each element is either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All functions with the same name end up in the same slot. The first two elements are for constructors, and destructors, respectively. All template conversion operators to innermost template dependent types are overloaded on the next slot, if they exist. Note, the names for these functions will not all be the same. The non-template conversion operators & templated conversions to non-innermost template types are next, followed by ordinary member functions. There may be empty entries at the end of the vector. The conversion operators are unsorted. The ordinary member functions are sorted, once the class is complete. */ #define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */ #define CLASSTYPE_CONSTRUCTOR_SLOT 0 /* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */ #define CLASSTYPE_DESTRUCTOR_SLOT 1 /* The first slot in the CLASSTYPE_METHOD_VEC where conversion operators can appear. */ #define CLASSTYPE_FIRST_CONVERSION_SLOT 2 /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_CONSTRUCTOR_SLOT)) /* A FUNCTION_DECL for the destructor for NODE. These are the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTORS(NODE) \ (CLASSTYPE_METHOD_VEC (NODE) \ ? VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_DESTRUCTOR_SLOT) \ : NULL_TREE) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* True if this a Java interface type, declared with '__attribute__ ((java_interface))'. */ #define TYPE_JAVA_INTERFACE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->java_interface) /* A VEC(tree) of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE) /* A VEC(tree_pair_s) of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. For a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT (for templates) or an OVERLOAD list of functions (for implicitly declared functions). */ #define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_LANG_SLOT_1 (NODE) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { unsigned selector : 16; /* Larger than necessary for faster access. */ ENUM_BITFIELD(languages) language : 4; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned repo_available_p : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn or type */ unsigned friend_attr : 1; /* fn or type */ unsigned template_conv_p : 1; /* var or template */ unsigned odr_used : 1; /* var or fn */ unsigned u2sel : 1; /* 1 spare bit */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == VAR_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%0.u.base.u2sel"))) u2; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In an overloaded operator, this is the value of DECL_OVERLOADED_OPERATOR_P. */ ENUM_BITFIELD (tree_code) operator_code : 16; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned constructor_attr : 1; unsigned destructor_attr : 1; unsigned assignment_operator_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; unsigned suppress_implicit_decl : 1; /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; struct language_function * GTY ((tag ("0"))) saved_language_function; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; cp_binding_level *level; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY((variable_size)) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("0"))) min; struct lang_decl_fn GTY ((tag ("1"))) fn; struct lang_decl_ns GTY((tag ("2"))) ns; struct lang_decl_parm GTY((tag ("3"))) parm; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min.u2; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->u.min.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->constructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->destructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false)) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (TREE_CODE (NODE) == VAR_DECL \ && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */ #define DECL_DISCRIMINATOR_SET_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) \ (DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE))) /* If FN is a conversion operator, the type to which it converts. Otherwise, NULL_TREE. */ #define DECL_CONV_FN_TYPE(FN) \ (DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE) /* Nonzero if NODE, which is a TEMPLATE_DECL, is a template conversion operator to a type dependent on the innermost template args. */ #define DECL_TEMPLATE_CONV_FN_P(NODE) \ (DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true) /* Set the overloaded operator code for NODE to CODE. */ #define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \ (LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE)) /* If NODE is an overloaded operator, then this returns the TREE_CODE associated with the overloaded operator. DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine whether or not NODE is an assignment operator. If NODE is not an overloaded operator, ERROR_MARK is returned. Since the numerical value of ERROR_MARK is zero, this macro can be used as a predicate to test whether or not NODE is an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ (IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->assignment_operator_p) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERN, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.friend_attr) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with an override virt-specifier */ #define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with a final virt-specifier */ #define DECL_FINAL_P(NODE) (TREE_LANG_FLAG_1 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (LANG_DECL_FN_CHECK (NODE)->context) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f (); }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* For a NAMESPACE_DECL: the list of using namespace directives The PURPOSE is the used namespace, the value is the namespace that is the common ancestor. */ #define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users of a namespace, to record the transitive closure of using namespace. */ #define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the list of namespaces which have associated themselves with this one. */ #define DECL_NAMESPACE_ASSOCIATIONS(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST concatenating using directives, indicate indirect directives */ #define TREE_INDIRECT_USING(NODE) (TREE_LIST_CHECK (NODE)->base.lang_flag_0) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) (TREE_LIST_CHECK (NODE)->base.lang_flag_0) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* Non zero if the using decl refers to a dependent type. */ #define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ #define TYPE_DECL_ALIAS_P(NODE) \ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) /* Nonzero for a type which is an alias for another type; i.e, a type which declaration was written 'using name-of-type = another-type'. */ #define TYPE_ALIAS_P(NODE) \ (TYPE_P (NODE) \ && TYPE_NAME (NODE) \ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) /* For a class type: if this structure has many fields, we'll sort them and put them into a TREE_VEC. */ #define CLASSTYPE_SORTED_FIELDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info) /* Template information for an ENUMERAL_TYPE. Although an enumeration may not be a primary template, it may be declared within the scope of a primary template and the enumeration constants may depend on non-type template parameters. */ #define ENUM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \ ->template_info) /* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a specialization of an alias template, this accessor returns the template info for the alias template, not the one (if any) for the template of the underlying type. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TYPE_ALIAS_P (NODE) \ ? ((TYPE_NAME (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ : NULL_TREE) \ : ((TREE_CODE (NODE) == ENUMERAL_TYPE) \ ? ENUM_TEMPLATE_INFO (NODE) \ : ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \ : (CLASS_TYPE_P (NODE) \ ? CLASSTYPE_TEMPLATE_INFO (NODE) \ : NULL_TREE)))) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \ : ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ ? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL)))) #define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE)) #define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #ifdef ENABLE_CHECKING #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. For a FIELD_DECL with a non-static data member initializer, this value is the FIELD_DECL it was instantiated from. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a parameter (i.e., a PARM_DECL) is a function parameter pack. */ #define FUNCTION_PARAMETER_PACK_P(NODE) \ (DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ ? &TREE_OPERAND (NODE, 1) \ : &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE))) /* Any additional template args to be applied when substituting into the pattern, set by tsubst_pack_expansion for partial instantiations. */ #define PACK_EXPANSION_EXTRA_ARGS(NODE) \ *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ ? &TYPE_MAXVAL (NODE) \ : &TREE_OPERAND ((NODE), 2)) /* True iff this pack expansion is within a function context. */ #define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_LANG_FLAG_0 (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ #define ARGUMENT_PACK_SELECT_ARG(NODE) \ TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \ ARGUMENT_PACK_SELECT_INDEX (NODE)); /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_language_function) /* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */ #define REFERENCE_REF_P(NODE) \ (TREE_CODE (NODE) == INDIRECT_REF \ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ && (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \ == REFERENCE_TYPE)) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ typedef struct aggr_init_expr_arg_iterator_d { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ } aggr_init_expr_arg_iterator; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ static inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ static inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ static inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ static inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The condition under which this MUST_NOT_THROW_EXPR actually blocks exceptions. NULL_TREE means 'true'. */ #define MUST_NOT_THROW_COND(NODE) \ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) \ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if NODE is a FUNCTION_DECL generated by implicitly_declare_fn that we shouldn't actually declare implicitly; it is only used for comparing to an =default declaration. */ #define FNDECL_SUPPRESS_IMPLICIT_DECL(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->suppress_implicit_decl) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-provided constructors, no brace-or-equal-initializers for non-static data members, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && VEC_empty (constructor_elt, \ CONSTRUCTOR_ELTS (NODE)) \ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* True if NODE represents a conversion for direct-initialization in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is a non-trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && TREE_CODE (NODE) != VOID_TYPE \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function. */ #define TYPE_PTRFN_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Nonzero for _TYPE node means that this type is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_LANG_SPECIFIC (NODE) \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTR_TO_MEMBER_P(NODE) \ (TYPE_PTRMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* These are use to manipulate the canonical RECORD_TYPE from the hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */ #define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \ (TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL) #define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \ do { \ if (TYPE_LANG_SPECIFIC (NODE) == NULL) \ { \ TYPE_LANG_SPECIFIC (NODE) = ggc_alloc_cleared_lang_type \ (sizeof (struct lang_type_ptrmem)); \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \ } \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \ } while (0) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) /* The type in question for an UNDERLYING_TYPE. */ #define UNDERLYING_TYPE_TYPE(NODE) \ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) /* The type in question for BASES. */ #define BASES_TYPE(NODE) \ (TYPE_VALUES_RAW (BASES_CHECK (NODE))) #define BASES_DIRECT(NODE) \ TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, lambda proxies look through implicit dereference. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ #define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). */ #define DECL_TEMPLATE_PARMS(NODE) DECL_NON_COMMON_CHECK (NODE)->decl_non_common.arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. */ #define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT_FLD (NODE) /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX (NODE) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the innermost template parameters for the specialization (e.g., `T' in the example above.) The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) DECL_SIZE (NODE) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero if NODE is a TEMPLATE_DECL representing an UNBOUND_CLASS_TEMPLATE tree node. */ #define DECL_UNBOUND_CLASS_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && !DECL_TEMPLATE_RESULT (NODE)) #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a class template or alias template. */ #define DECL_TYPE_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) /* Nonzero for a DECL that represents a class template. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a TEMPLATE_DECL that represents an alias template. */ #define DECL_ALIAS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_CLASS_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that second typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL)) /* Nonzero if DECL is a function generated from a function 'temploid', i.e. template, member of class template, or dependent friend. */ #define DECL_TEMPLOID_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INSTANTIATION (DECL) \ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) /* Nonzero if DECL is either defined implicitly by the compiler or generated from a temploid. */ #define DECL_GENERATED_P(DECL) \ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE)) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_FOR_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_COPYPRIVATE)) /* Nonzero if this transaction expression's body contains statements. */ #define TRANSACTION_EXPR_IS_STMT(NODE) \ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) #define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression, body, and scope of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) #define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* True if this CONVERT_EXPR is for a conversion to virtual base in an NSDMI, and should be re-evaluated when used in a constructor. */ #define CONVERT_EXPR_VBASE_PATH(NODE) \ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type /* "typename" types. */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An rvalue formed using an rvalue reference */ clk_class = 4, /* An rvalue of class-type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ typedef enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ } tmpl_spec_kind; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ typedef enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ } access_kind; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ typedef enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion /* A conversion operator. */ } special_function_kind; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ typedef enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ } linkage_kind; typedef enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic } duration_kind; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_no_access_control = 1 << 7, /* Do not perform access checks, even when issuing other errors. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2, /* Ignore access allowed by local scope. */ ba_quiet = 1 << 3 /* Do not issue error messages. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ typedef enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ } deferring_kind; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ typedef enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ } base_kind; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; extern tree cp_convert_range_for (tree, tree, tree); /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ typedef enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT } unification_kind_t; /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) VEC(tree,gc) *local_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #define ANON_AGGRNAME_FORMAT "._%d" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #define ANON_AGGRNAME_FORMAT "$_%d" #else /* NO_DOLLAR_IN_LABEL */ #define IN_CHARGE_NAME "__in_chrg" #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #define ANON_AGGRNAME_PREFIX "__anon_" #define ANON_AGGRNAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \ sizeof (ANON_AGGRNAME_PREFIX) - 1)) #define ANON_AGGRNAME_FORMAT "__anon_%d" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define THIS_NAME "this" #define IN_CHARGE_NAME "__in_chrg" #define VTBL_PTR_TYPE "__vtbl_ptr_type" #define VTABLE_DELTA_NAME "__delta" #define VTABLE_PFN_NAME "__pfn" #define LAMBDANAME_PREFIX "__lambda" #define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d" #define LAMBDANAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ LAMBDANAME_PREFIX, \ sizeof (LAMBDANAME_PREFIX) - 1)) #define UDLIT_OP_ANSI_PREFIX "operator\"\" " #define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" #define UDLIT_OP_MANGLED_PREFIX "li" #define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" #define UDLIT_OPER_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ UDLIT_OP_ANSI_PREFIX, \ sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) #define UDLIT_OP_SUFFIX(ID_NODE) \ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) /* For anonymous aggregate types, we need some sort of name to hold on to. In practice, this should not appear, but it should not be harmful if it does. */ #define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \ && IDENTIFIER_POINTER (ID_NODE)[1] == '_') #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. */ extern int at_eof; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) /* Complain if no suitable member function matching the arguments is found. */ #define LOOKUP_COMPLAIN (1 << 1) #define LOOKUP_NORMAL (LOOKUP_PROTECT | LOOKUP_COMPLAIN) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 2) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 3) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 4) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 5) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 6) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 7) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 8) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 9) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* Prefer that the lvalue be treated as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* We're looking up a constructor for list-initialization. */ #define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but since access control doesn't respect SFINAE we can't just use tf_none to avoid access control errors, we need another mechanism. Exiting early also avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) /* An instantiation with explicit template arguments. */ #define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1) /* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ #define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with push_overloaded_decl. */ #define PUSH_GLOBAL 0 /* Push the DECL into namespace scope, regardless of the current scope. */ #define PUSH_LOCAL 1 /* Push the DECL into the current scope. */ #define PUSH_USING 2 /* We are pushing this DECL as the result of a using declaration. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) /* The Number of sibling parms this template parm has. */ #define TEMPLATE_PARM_NUM_SIBLINGS(NODE) \ (TEMPLATE_PARM_INDEX_CAST (NODE)->num_siblings) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))) #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); typedef struct GTY(()) operator_name_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The arity of the operator. */ int arity; } operator_name_info_t; /* A mapping from tree codes to operator name information. */ extern GTY(()) operator_name_info_t operator_name_info [(int) MAX_TREE_CODES]; /* Similar, but for assignment operators. */ extern GTY(()) operator_name_info_t assignment_operator_name_info [(int) MAX_TREE_CODES]; /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* Non-static member functions have an optional virt-specifier-seq. There is a VIRT_SPEC value for each virt-specifier. They can be combined by bitwise-or to form the complete set of virt-specifiers for a member function. */ enum virt_specifier { VIRT_SPEC_UNSPECIFIED = 0x0, VIRT_SPEC_FINAL = 0x1, VIRT_SPEC_OVERRIDE = 0x2 }; /* A type-qualifier, or bitmask therefore, using the VIRT_SPEC constants. */ typedef int cp_virt_specifiers; /* A storage class. */ typedef enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable } cp_storage_class; /* An individual decl-specifier. */ typedef enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_alias, ds_constexpr, ds_complex, ds_thread, ds_last } cp_decl_spec; /* A decl-specifier-seq. */ typedef struct cp_decl_specifier_seq { /* The number of times each of the keywords has been seen. */ unsigned specs[(int) ds_last]; /* The location of the primary type. Mainly used for error reporting. */ location_t type_location; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* True iff TYPE_SPEC defines a class or enum. */ BOOL_BITFIELD type_definition_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__int128" was explicitly provided. */ BOOL_BITFIELD explicit_int128_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; } cp_decl_specifier_seq; /* The various kinds of declarators. */ typedef enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_error } cp_declarator_kind; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is the first parameter in the list and the parameter sequence ends with an ellipsis. */ bool ellipsis_p; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; location_t id_loc; /* Currently only set for cdk_id and cdk_function. */ /* Attributes that apply to this declarator. */ tree attributes; /* For all but cdk_id and cdk_error, the contained declarator. For cdk_id and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The virt-specifiers for the function. */ cp_virt_specifiers virt_specifiers; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY((chain_next ("%h.next"))) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. Can be either a DECL (for a function or static data member) or a TYPE (for a class), depending on what we were asked to instantiate. */ tree decl; /* The location where the template is instantiated. */ location_t locus; /* errorcount+sorrycount when we pushed this level. */ int errors; /* True if the location is in a system header. */ bool in_system_header_p; }; /* Return the type of the `this' parameter of FNTYPE. */ static inline tree type_of_this_parm (const_tree fntype) { function_args_iterator iter; gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); function_args_iter_init (&iter, fntype); return function_args_iter_cond (&iter); } /* Return the class of the `this' parameter of FNTYPE. */ static inline tree class_of_this_parm (const_tree fntype) { return TREE_TYPE (type_of_this_parm (fntype)); } /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* True if we saw "#pragma GCC java_exceptions". */ extern bool pragma_java_exceptions; /* in call.c */ extern bool check_dtor_name (tree, tree); extern tree build_conditional_expr (tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree); extern void set_flags_from_callee (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool null_member_pointer_value_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree build_user_type_conversion (tree, tree, int); extern tree build_new_function_call (tree, VEC(tree,gc) **, bool, tsubst_flags_t); extern tree build_operator_new_call (tree, VEC(tree,gc) **, tree *, tree *, tree *); extern tree build_new_method_call (tree, tree, VEC(tree,gc) **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, VEC(tree,gc) **, tree, int, tsubst_flags_t); extern tree build_new_op (enum tree_code, int, tree, tree, tree, tree *, tsubst_flags_t); extern tree build_op_call (tree, VEC(tree,gc) **, tsubst_flags_t); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree); extern bool can_convert (tree, tree); extern bool can_convert_arg (tree, tree, tree, int); extern bool can_convert_arg_bad (tree, tree, tree, int); extern bool enforce_access (tree, tree, tree); extern void push_defarg_context (tree); extern void pop_defarg_context (void); extern tree convert_default_arg (tree, tree, tree, int); extern tree convert_arg_to_ellipsis (tree); extern tree build_x_va_arg (tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, int, tsubst_flags_t); extern tree extend_ref_init_temps (tree, tree, VEC(tree,gc)**); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); #ifdef ENABLE_CHECKING extern void validate_conversion_obstack (void); #endif /* ENABLE_CHECKING */ /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_base_path (enum tree_code, tree, tree, int, tsubst_flags_t); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern void resort_type_method_vec (void *, void *, gt_pointer_operator, void *); extern bool add_method (tree, tree, tree); extern bool currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void print_class_statistics (void); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_user_provided_default_constructor (tree); extern tree default_init_uninitialized_part (tree); extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_virtual_destructor (tree); extern bool type_has_move_constructor (tree); extern bool type_has_move_assign (tree); extern bool type_has_user_declared_move_constructor (tree); extern bool type_has_user_declared_move_assign(tree); extern bool type_build_ctor_call (tree); extern void explain_non_literal_class (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); extern void clone_function_decl (tree, int); extern void adjust_clone_args (tree); extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree); extern tree convert_from_reference (tree); extern tree force_rvalue (tree, tsubst_flags_t); extern tree ocp_convert (tree, tree, int, int); extern tree cp_convert (tree, tree); extern tree cp_convert_and_check (tree, tree); extern tree cp_fold_convert (tree, tree); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern tree perform_qualification_conversions (tree, tree); /* in name-lookup.c */ extern tree pushdecl (tree); extern tree pushdecl_maybe_friend (tree, bool); extern void maybe_push_cleanup_level (tree); extern tree pushtag (tree, tree, tag_scope); extern tree make_anon_name (void); extern tree pushdecl_top_level_maybe_friend (tree, bool); extern tree pushdecl_top_level_and_finish (tree, tree); extern tree check_for_out_of_scope_variable (tree); extern void print_other_binding_stack (cp_binding_level *); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern tree make_lambda_name (void); extern int decls_match (tree, tree); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree); extern tree build_cp_library_fn_ptr (const char *, tree); extern tree push_library_fn (tree, tree, tree); extern tree push_void_library_fn (tree, tree); extern tree push_throw_library_fn (tree, tree); extern tree check_tag_decl (cp_decl_specifier_seq *); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern int cp_complete_array_type (tree *, tree, bool); extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, cp_cv_quals); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern bool move_signature_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern int grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern bool xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern void start_preparsed_function (tree, tree, int); extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (int); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern void finish_stmt (void); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree); typedef int (*walk_namespaces_fn) (tree, void *); extern int walk_namespaces (walk_namespaces_fn, void *); extern int wrapup_globals_for_namespace (tree, void *); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern void initialize_artificial_var (tree, VEC(constructor_elt,gc) *); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree, tsubst_flags_t); extern tree next_initializable_field (tree); extern bool defer_mark_used_calls; extern GTY(()) VEC(tree, gc) *deferred_mark_used_calls; extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); /* in decl2.c */ extern bool check_java_method (tree); extern tree build_memfn_type (tree, tree, cp_cv_quals); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (tree, tree); extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree); extern tree cp_reconstruct_complex_type (tree, tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cp_write_global_declarations (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, VEC(tree,gc) **); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void check_default_args (tree); extern bool mark_used (tree); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree); extern tree set_guard (tree); extern tree cxx_callgraph_analyze_expr (tree *, int *); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern tree build_artificial_parm (tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); /* in error.c */ extern void init_error (void); extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void print_instantiation_context (void); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern bool expr_noexcept_p (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, int); extern void choose_personality_routine (enum languages); extern tree build_must_not_throw_expr (tree,tree); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_rvalue_use (tree); extern tree mark_lvalue_use (tree); extern tree mark_type_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree build_offset_ref (tree, tree, bool); extern tree build_new (VEC(tree,gc) **, tree, tree, VEC(tree,gc) **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (tree, tree, special_function_kind, int, int, tsubst_flags_t); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int, tsubst_flags_t); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree build_java_class_ref (tree); extern tree integral_constant_value (tree); extern tree decl_constant_value_safe (tree); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree); extern tree unqualified_fn_lookup_error (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern tree copy_decl (tree); extern tree copy_type (tree); extern tree cxx_make_type (enum tree_code); extern tree make_class_type (enum tree_code); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern bool maybe_explain_implicit_delete (tree); extern void explain_implicit_non_constexpr (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree locate_ctor (tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* in pt.c */ extern bool check_template_shadow (tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern tree check_explicit_specialization (tree, tree, int, int); extern tree make_auto (void); extern tree do_auto_deduction (tree, tree, tree); extern tree type_uses_auto (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool, unsigned); extern tree end_template_parm_list (tree); void fixup_template_parms (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, int, int, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern bool redeclare_class_template (tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern int uses_template_parms (tree); extern int uses_template_parms_level (tree, int); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern int fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int, bool); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern void maybe_instantiate_noexcept (tree); extern tree instantiate_decl (tree, int, bool); extern int comp_template_parms (const_tree, const_tree); extern bool uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree); extern bool check_for_bare_parameter_packs (tree); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern VEC(qualified_typedef_usage_t,gc)* get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, tree, tree); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern int problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool any_type_dependent_arguments_p (const VEC(tree,gc) *); extern bool any_type_dependent_elements_p (const_tree); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (VEC(tree,gc) *); extern bool reregister_specialization (tree, tree, tree); extern tree fold_non_dependent_expr (tree); extern bool alias_type_or_template_p (tree); extern bool alias_template_specialization_p (tree); extern bool explicit_class_specialization_p (tree); extern int push_tinst_level (tree); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern bool parameter_of_template_p (tree, tree); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); extern bool primary_template_instantiation_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (const_tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) VEC(tree,gc) *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree); extern tree build_headof (tree); extern tree build_dynamic_cast (tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern tree lookup_field_1 (tree, tree, bool); extern tree lookup_field (tree, tree, int, bool); extern int lookup_fnfields_1 (tree, tree); extern tree lookup_fnfields_slot (tree, tree); extern tree lookup_fnfields_slot_nolazy (tree, tree); extern int class_method_index_for_fn (tree, tree); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool, tsubst_flags_t); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern void print_search_statistics (void); extern void reinit_search_statistics (void); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); /* The representation of a deferred access check. */ typedef struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; } deferred_access_check; DEF_VEC_O(deferred_access_check); DEF_VEC_ALLOC_O(deferred_access_check,gc); /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern VEC (deferred_access_check,gc)* get_deferred_access_checks (void); extern void pop_to_parent_deferring_access_checks (void); extern void perform_access_checks (VEC (deferred_access_check,gc)*); extern void perform_deferred_access_checks (void); extern void perform_or_defer_access_check (tree, tree, tree); extern bool speculative_access_check (tree, tree, tree, bool); extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void add_decl_expr (tree); extern tree maybe_cleanup_point_expr_void (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern void finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_for_init_stmt (tree); extern void finish_for_cond (tree, tree); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool literal_type_p (tree); extern tree register_constexpr_fundef (tree, tree); extern bool check_constexpr_ctor_body (tree, tree); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree); extern tree maybe_constant_value (tree); extern tree maybe_constant_init (tree); extern bool is_sub_constant_expr (tree); extern bool reduced_constant_expression_p (tree); extern void explain_invalid_constexpr_fn (tree); extern VEC(tree,heap)* cx_error_context (void); enum { BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern tree finish_parenthesized_expr (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern tree perform_koenig_lookup (tree, VEC(tree,gc) *, bool, tsubst_flags_t); extern tree finish_call_expr (tree, VEC(tree,gc) **, bool, bool, tsubst_flags_t); extern tree finish_increment_expr (tree, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree); extern tree finish_unary_op_expr (enum tree_code, tree); extern tree finish_compound_literal (tree, tree, tsubst_flags_t); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern tree finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_underlying_type (tree); extern tree calculate_bases (tree); extern tree finish_bases (tree, bool); extern tree calculate_direct_bases (tree); extern tree finish_offsetof (tree); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern void note_decl_for_pch (tree); extern tree finish_omp_clauses (tree); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, tree, tree, tree, tree, tree, tree, tree); extern void finish_omp_atomic (enum tree_code, enum tree_code, tree, tree, tree, tree, tree); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern tree begin_transaction_stmt (location_t, tree *, int); extern void finish_transaction_stmt (tree, tree, int, tree); extern tree build_transaction_expr (location_t, tree, int, tree); extern void finish_omp_taskyield (void); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree); extern tree lambda_return_type (tree); extern tree lambda_proxy_type (tree); extern tree lambda_function (tree); extern void apply_lambda_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern tree build_capture_proxy (tree); extern void insert_capture_proxy (tree); extern void insert_pending_capture_proxies (void); extern bool is_capture_proxy (tree); extern bool is_normal_capture_proxy (tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree); extern tree nonlambda_method_basetype (void); extern void maybe_add_lambda_conv_op (tree); extern bool is_lambda_ignored_entity (tree); /* in tree.c */ extern int cp_tree_operand_length (const_tree); void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree, tsubst_flags_t); extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern void stabilize_aggr_init (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern tree strip_typedefs (tree); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool lvalue_or_rvalue_with_address_p (const_tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt (enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_call_vec (tree, tree, VEC(tree,gc) *); extern tree build_cplus_new (tree, tree, tsubst_flags_t); extern tree build_aggr_init_expr (tree, tree, tsubst_flags_t); extern tree get_target_expr (tree); extern tree get_target_expr_sfinae (tree, tsubst_flags_t); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern int is_overloaded_fn (tree); extern tree dependent_name (tree); extern tree get_fns (tree); extern tree get_first_fn (tree); extern tree ovl_cons (tree, tree); extern tree build_overload (tree, tree); extern tree ovl_scope (tree); extern bool non_static_member_function_p (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool really_overloaded_fn (tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, struct pointer_set_t*); #define cp_walk_tree(a,b,c,d) \ walk_tree_1 (a, b, c, d, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(a,b,c) \ walk_tree_without_duplicates_1 (a, b, c, cp_walk_subtrees) extern tree fold_if_not_in_template (tree); extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern void cxx_print_statistics (void); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ extern bool cxx_mark_addressable (tree); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); extern int type_unknown_p (const_tree); enum { ce_derived, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree); extern tree build_class_member_access_expr (tree, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (tree, tree, bool, tsubst_flags_t); extern tree build_x_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree); extern tree cp_build_function_call (tree, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, VEC(tree,gc) **, tsubst_flags_t); extern tree build_x_binary_op (enum tree_code, tree, enum tree_code, tree, enum tree_code, tree *, tsubst_flags_t); extern tree build_x_array_ref (tree, tree, tsubst_flags_t); extern tree build_x_unary_op (enum tree_code, tree, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, int, tsubst_flags_t); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (VEC(tree,gc) *, const char *); extern tree build_x_compound_expr (tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t); extern tree build_const_cast (tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern tree cp_build_c_cast (tree, tree, tsubst_flags_t); extern tree build_x_modify_expr (tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern bool error_type_p (const_tree); extern int ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree cp_build_binary_op (location_t, enum tree_code, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true) extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_typed_address (tree, tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (const_tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); extern bool check_raw_literal_operator (const_tree decl); extern bool check_literal_operator_args (const_tree, bool *, bool *); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t); #undef cxx_incomplete_type_error extern void cxx_incomplete_type_error (const_tree, const_tree); #define cxx_incomplete_type_error(V,T) \ (cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR)) extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); extern tree store_init_value (tree, tree, VEC(tree,gc)**, int); extern void check_narrowing (tree, tree); extern tree digest_init (tree, tree, tsubst_flags_t); extern tree digest_init_flags (tree, tree, int); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (tree); extern tree build_m_component_ref (tree, tree); extern tree build_functional_cast (tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree); extern tree mangle_conv_op_name_for_type (tree); extern tree mangle_guard_variable (tree); extern tree mangle_ref_init_variable (tree); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern bool cxx_omp_const_qual_no_mutable (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree); extern bool cxx_omp_privatize_by_reference (const_tree); /* in name-lookup.c */ extern void suggest_alternatives_for (location_t, tree); extern tree strip_using_decl (tree); /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
GrB_Monoid_wait.c
//------------------------------------------------------------------------------ // GrB_Monoid_wait: wait for a user-defined GrB_Monoid to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Monoid has no pending // operations to wait for. All this method does is verify that the monoid is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_Monoid_wait // no work, just check if the GrB_Monoid is valid ( #if (GxB_IMPLEMENTATION_MAJOR <= 5) GrB_Monoid *monoid #else GrB_Monoid monoid, GrB_WaitMode waitmode #endif ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #if (GxB_IMPLEMENTATION_MAJOR <= 5) GB_WHERE1 ("GrB_Monoid_wait (&monoid)") ; GB_RETURN_IF_NULL (monoid) ; GB_RETURN_IF_NULL_OR_FAULTY (*monoid) ; #else GB_WHERE1 ("GrB_Monoid_wait (monoid, waitmode)") ; GB_RETURN_IF_NULL_OR_FAULTY (monoid) ; #endif //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
omp_reduction.c
/****************************************************************************** * FILE: omp_reduction.c * DESCRIPTION: * OpenMP Example - Combined Parallel Loop Reduction - C/C++ Version * This example demonstrates a sum reduction within a combined parallel loop * construct. Notice that default data element scoping is assumed - there * are no clauses specifying shared or private variables. OpenMP will * automatically make loop index variables private within team threads, and * global variables shared. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int i, n; float a[100], b[100], sum; /* Some initializations */ n = 100; for (i=0; i < n; i++) a[i] = b[i] = i * 1.0; sum = 0.0; #pragma omp parallel for reduction(+:sum) for (i=0; i < n; i++) sum = sum + (a[i] * b[i]); printf(" Sum = %f\n",sum); }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
yuv_to_rgb2.c
/* * YUV to RGB convert * * Copyright (C) 2019 Hiroshi Kuwagata <kgt9221@gamil.com> */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #define ROTATE0 0x00000000 #define ROTATE90 0x00000001 #define ROTATE180 0x00000002 #define ROTATE270 0x00000003 #define FLIP 0x00000080 #ifdef ENABLE_NEON #if defined(__ARM_NEON) || defined(__ARM_NEON__) #include <arm_neon.h> #else /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ #error "ARM NEON instruction is not supported." #endif /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ #endif /* defined(ENABLE_NEON) */ #ifdef ENABLE_SSE2 #if defined(__SSE2__) #if defined(_MSC_VER) #include <intrin.h> #elif defined(__GNUC__) #include <x86intrin.h> #endif /* defined(*) */ #else /* defined(__SSE2__) */ #error "SSE2 instruction is not supported." #endif /* defined(__SSE2__) */ #endif /* defined(ENABLE_SSE2) */ #define SATURATE8(x) (uint8_t)(((x) < 0)? 0:(((x) > 255)? 255:(x))) struct dest_info { uint8_t* b0; uint8_t* g0; uint8_t* r0; uint8_t* b1; uint8_t* g1; uint8_t* r1; uint8_t* b2; uint8_t* g2; uint8_t* r2; uint8_t* b3; uint8_t* g3; uint8_t* r3; }; static inline void set0(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (y * (wd * 3)); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r1 = p + 3; di->g1 = p + 4; di->b1 = p + 5; p += (wd *3); di->r2 = p + 0; di->g2 = p + 1; di->b2 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static inline void set0f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((y + 1) * (wd * 3)); di->r0 = p - 6; di->g0 = p - 5; di->b0 = p - 4; di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; p += (wd * 3); di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; di->r3 = p - 3; di->g3 = p - 2; di->b3 = p - 1; } static inline void inc0(int wd, int ht, struct dest_info* di) { di->b0 += 6; di->g0 += 6; di->r0 += 6; di->b1 += 6; di->g1 += 6; di->r1 += 6; di->b2 += 6; di->g2 += 6; di->r2 += 6; di->b3 += 6; di->g3 += 6; di->r3 += 6; } static inline void set90(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - y) * 3); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p += (ht * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static inline void set90f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (y * 3); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p += (ht * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static inline void inc90(int wd, int ht, struct dest_info* di) { int st; st = ht * 6; di->b0 += st; di->g0 += st; di->r0 += st; di->b1 += st; di->g1 += st; di->r1 += st; di->b2 += st; di->g2 += st; di->r2 += st; di->b3 += st; di->g3 += st; di->r3 += st; } static inline void set180(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - y) * (wd * 3)); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p -= (wd * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static inline void set180f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - (y + 1)) * (wd * 3)); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p -= (wd * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static void inc180(int wd, int ht, struct dest_info* di) { di->b0 -= 6; di->g0 -= 6; di->r0 -= 6; di->b1 -= 6; di->g1 -= 6; di->r1 -= 6; di->b2 -= 6; di->g2 -= 6; di->r2 -= 6; di->b3 -= 6; di->g3 -= 6; di->r3 -= 6; } static void set270(uint8_t* base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (((ht * (wd - 1)) + y) * 3); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p -= (ht * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static void set270f(uint8_t* base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (((ht * wd) - y) * 3); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p -= (ht * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static void inc270(int wd, int ht, struct dest_info* di) { int st; st = ht * 6; di->b0 -= st; di->g0 -= st; di->r0 -= st; di->b1 -= st; di->g1 -= st; di->r1 -= st; di->b2 -= st; di->g2 -= st; di->r2 -= st; di->b3 -= st; di->g3 -= st; di->r3 -= st; } #ifdef ENABLE_NEON /* * 2x2ピクセルを1ユニットとして処理する。 * ピクセルに対するレジスタのレーン配置は以下の通り。 * * 0 1 * 2 3 * * YUVからRGBへの変換式は以下の通り * * R = (1.164f * (y - 16)) + (1.596f * (v - 128)) * G = (1.164f * (y - 16)) - (0.813f * (v - 128)) - (0.391f * (u - 128)) * B = (1.164f * (y - 16)) + (2.018f * (u - 128)) * * 上記を、整数演算化による高速化を狙って以下の様に実装する。 * * R = ((1192 * (y - 16)) + (1634 * (v - 128))) >> 10 * G = ((1192 * (y - 16)) - ( 833 * (v - 128)) - (400 * (u - 128))) >> 10 * B = ((1192 * (y - 16)) + (2066 * (u - 128))) >> 10 * */ static inline void conv(uint8_t* y1, uint8_t* y2, uint8_t* u, uint8_t* v, int32x4_t c16, int32x4_t min, int32x4_t max, struct dest_info* di) { int32x4_t tl; // as "temporary for load" int32x4_t vy; int32x4_t vr; int32x4_t vg; int32x4_t vb; /* * Y */ tl = vsetq_lane_s32(y1[0], tl, 0); tl = vsetq_lane_s32(y1[1], tl, 1); tl = vsetq_lane_s32(y2[0], tl, 2); tl = vsetq_lane_s32(y2[1], tl, 3); tl = vsubq_s32(tl, c16); vy = vmulq_n_s32(tl, 1192); /* * U */ tl = vmovq_n_s32(u[0] - 128); vg = vmlsq_n_s32(vy, tl, 400); vb = vmlaq_n_s32(vy, tl, 2066); /* * V */ tl = vmovq_n_s32(v[0] - 128); vr = vmlaq_n_s32(vy, tl, 1634); vg = vmlsq_n_s32(vg, tl, 833); /* * スケールの戻しと飽和処理 */ vr = vshrq_n_s32(vr, 10); vr = vmaxq_s32(vr, min); vr = vminq_s32(vr, max); vg = vshrq_n_s32(vg, 10); vg = vmaxq_s32(vg, min); vg = vminq_s32(vg, max); vb = vshrq_n_s32(vb, 10); vb = vmaxq_s32(vb, min); vb = vminq_s32(vb, max); /* * output RGB pixels */ *(di->r0) = vgetq_lane_s32(vr, 0); *(di->g0) = vgetq_lane_s32(vg, 0); *(di->b0) = vgetq_lane_s32(vb, 0); *(di->r1) = vgetq_lane_s32(vr, 1); *(di->g1) = vgetq_lane_s32(vg, 1); *(di->b1) = vgetq_lane_s32(vb, 1); *(di->r2) = vgetq_lane_s32(vr, 2); *(di->g2) = vgetq_lane_s32(vg, 2); *(di->b2) = vgetq_lane_s32(vb, 2); *(di->r3) = vgetq_lane_s32(vr, 3); *(di->g3) = vgetq_lane_s32(vg, 3); *(di->b3) = vgetq_lane_s32(vb, 3); } void i420_to_rgb_0(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc0(wd, ht, &di); } } } void i420_to_rgb_0f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc180(wd, ht, &di); } } } void i420_to_rgb_90(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc90(wd, ht, &di); } } } void i420_to_rgb_90f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; struct dest_info di; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc90(wd, ht, &di); } } } void i420_to_rgb_180(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc180(wd, ht, &di); } } } void i420_to_rgb_180f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc0(wd, ht, &di); } } } void i420_to_rgb_270(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc270(wd, ht, &di); } } } void i420_to_rgb_270f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc270(wd, ht, &di); } } } #else /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ /* * 2x2ピクセルを1ユニットとして処理する。 * YUVからRGBへの変換式は以下の通り * * R = (1.164f * (y - 16)) + (1.596f * (v - 128)) * G = (1.164f * (y - 16)) - (0.813f * (v - 128)) - (0.391f * (u - 128)) * B = (1.164f * (y - 16)) + (2.018f * (u - 128)) * * 上記を、整数演算化による高速化を狙って以下の様に実装する。 * * R = ((1192 * (y - 16)) + (1634 * (v - 128))) >> 10 * G = ((1192 * (y - 16)) - ( 833 * (v - 128)) - (400 * (u - 128))) >> 10 * B = ((1192 * (y - 16)) + (2066 * (u - 128))) >> 10 */ static inline void conv(uint8_t* y1, uint8_t* y2, uint8_t* u, uint8_t* v, struct dest_info* di) { int c; int d; int e; int r0; int g0; int b0; int r; int g; int b; d = ((int)u[0]) - 128; e = ((int)v[0]) - 128; r0 = (e * 1634); g0 = (d * 400) + (e * 833); b0 = (d * 2066); /* * 0,0 */ c = (((int)y1[0]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r0) = SATURATE8(r); *(di->g0) = SATURATE8(g); *(di->b0) = SATURATE8(b); /* * 0,1 */ c = (((int)y1[1]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r1) = SATURATE8(r); *(di->g1) = SATURATE8(g); *(di->b1) = SATURATE8(b); /* * 1,0 */ c = (((int)y2[0]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r2) = SATURATE8(r); *(di->g2) = SATURATE8(g); *(di->b2) = SATURATE8(b); /* * 1,1 */ c = (((int)y2[1]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r3) = SATURATE8(r); *(di->g3) = SATURATE8(g); *(di->b3) = SATURATE8(b); } void i420_to_rgb_0(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc0(wd, ht, &di); } } } void i420_to_rgb_0f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc180(wd, ht, &di); } } } void i420_to_rgb_90(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc90(wd, ht, &di); } } } void i420_to_rgb_90f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc90(wd, ht, &di); } } } void i420_to_rgb_180(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc180(wd, ht, &di); } } } void i420_to_rgb_180f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc0(wd, ht, &di); } } } void i420_to_rgb_270(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc270(wd, ht, &di); } } } void i420_to_rgb_270f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc270(wd, ht, &di); } } } #endif /* defined(__ARM_NEON) || defined(__ARM_NEON__) */
DRB093-doall2-collapse-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: collapse(2) is used to associate two loops with omp for. The corresponding loop iteration variables are private. */ #include <omp.h> int a[100][100]; int main() { int i; int j; #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] = i; } } #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] = a[i][j] + 1; } } for (i = 0; i <= 99; i += 1) { for (j = 0; j <= 99; j += 1) { printf("%d\n",a[i][j]); } } return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(24*t3+Nx+20,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),128*t4+126),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Example_target_unstructured_data.1.c
/* * @@name: target-unstructured-data.1.c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.5 */ #include <stdlib.h> typedef struct { double *A; int N; } Matrix; void init_matrix(Matrix *mat, int n) { mat->A = (double *)malloc(n*sizeof(double)); mat->N = n; #pragma omp target enter data map(alloc:mat->A[:n]) } void free_matrix(Matrix *mat) { #pragma omp target exit data map(delete:mat->A[:mat->N]) mat->N = 0; free(mat->A); mat->A = NULL; }
ast-dump-openmp-atomic.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int i) { #pragma omp atomic ++i; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-atomic.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used i 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPAtomicDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, col:5> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-UnaryOperator {{.*}} <col:3, col:5> 'int' prefix '++' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:5> 'int' lvalue ParmVar {{.*}} 'i' 'int' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-atomic.c:4:1) *const restrict' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:5:5> 'int' lvalue ParmVar {{.*}} 'i' 'int'
maxwell_TV_setup.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.15 $ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Are private static arrays a problem? * ******************************************************************************/ #include "_hypre_sstruct_ls.h" #include "maxwell_TV.h" #include "par_amg.h" #define DEBUG 0 /*-------------------------------------------------------------------------- * hypre_MaxwellTV_Setup *--------------------------------------------------------------------------*/ HYPRE_Int hypre_MaxwellTV_Setup(void *maxwell_vdata, hypre_SStructMatrix *Aee_in, hypre_SStructVector *b_in, hypre_SStructVector *x_in) { hypre_MaxwellData *maxwell_TV_data = maxwell_vdata; MPI_Comm comm = hypre_SStructMatrixComm(Aee_in); hypre_SStructGraph *graph= hypre_SStructMatrixGraph(Aee_in); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_Index *rfactor_in= (maxwell_TV_data-> rfactor); hypre_ParCSRMatrix *T = (maxwell_TV_data-> Tgrad); hypre_SStructMatrix *Ann; HYPRE_IJMatrix Aen; hypre_SStructVector *bn; hypre_SStructVector *xn; hypre_ParCSRMatrix *Aee = hypre_SStructMatrixParCSRMatrix(Aee_in); hypre_ParCSRMatrix *T_transpose; hypre_ParCSRMatrix *transpose; hypre_ParCSRMatrix *parcsr_mat; HYPRE_Int size, *col_inds; double *values; hypre_ParVector *parvector_x; hypre_ParVector *parvector_b; hypre_ParCSRMatrix **Aen_l; void *amg_vdata; hypre_ParAMGData *amg_data; hypre_ParCSRMatrix **Ann_l; hypre_ParCSRMatrix **Pn_l; hypre_ParCSRMatrix **RnT_l; hypre_ParVector **bn_l; hypre_ParVector **xn_l; hypre_ParVector **resn_l; hypre_ParVector **en_l; hypre_ParVector **nVtemp_l; hypre_ParVector **nVtemp2_l; HYPRE_Int **nCF_marker_l; double *nrelax_weight; double *nomega; HYPRE_Int nrelax_type; HYPRE_Int node_numlevels; hypre_ParCSRMatrix **Aee_l; hypre_IJMatrix **Pe_l; hypre_IJMatrix **ReT_l; hypre_ParVector **be_l; hypre_ParVector **xe_l; hypre_ParVector **rese_l; hypre_ParVector **ee_l; hypre_ParVector **eVtemp_l; hypre_ParVector **eVtemp2_l; double *erelax_weight; double *eomega; HYPRE_Int **eCF_marker_l; HYPRE_Int erelax_type; /* objects needed to fine the edge relaxation parameters */ HYPRE_Int relax_type; /*HYPRE_Int *relax_types; void *e_amg_vdata; hypre_ParAMGData *e_amgData; HYPRE_Int numCGSweeps= 10; HYPRE_Int **amg_CF_marker; hypre_ParCSRMatrix **A_array;*/ hypre_SStructGrid *node_grid; hypre_SStructGraph *node_graph; HYPRE_Int *coarsen; hypre_SStructGrid **egrid_l; hypre_SStructGrid *edge_grid, *face_grid, *cell_grid; hypre_SStructGrid **topological_edge, **topological_face, **topological_cell; HYPRE_Int **BdryRanks_l; HYPRE_Int *BdryRanksCnts_l; hypre_SStructPGrid *pgrid; hypre_StructGrid *sgrid; hypre_BoxArray *boxes, *tmp_box_array; hypre_Box *box, *box_piece, *contract_box; hypre_BoxArray *cboxes; HYPRE_SStructVariable *vartypes, *vartype_edges, *vartype_faces, *vartype_cell; hypre_SStructStencil **Ann_stencils; hypre_MaxwellOffProcRow **OffProcRows; HYPRE_Int num_OffProcRows; hypre_Index rfactor; hypre_Index index, cindex, shape, loop_size, start, lindex; HYPRE_Int stencil_size; HYPRE_Int matrix_type= HYPRE_PARCSR; HYPRE_Int ndim = hypre_SStructMatrixNDim(Aee_in); HYPRE_Int nparts, part, vars, nboxes, lev_nboxes; HYPRE_Int nrows, rank, start_rank; HYPRE_Int *flag, *flag2, *inode, *ncols, *jnode; double *vals; HYPRE_Int i, j, k, l, m; hypre_BoxManager *node_boxman; hypre_BoxManEntry *entry; HYPRE_Int kstart, kend; HYPRE_Int ilower, iupper; HYPRE_Int jlower, jupper; HYPRE_Int myproc; HYPRE_Int first_local_row, last_local_row; HYPRE_Int first_local_col, last_local_col; HYPRE_Int edge_maxlevels, edge_numlevels, en_numlevels; HYPRE_Int constant_coef= maxwell_TV_data -> constant_coef; HYPRE_Int true = 1; HYPRE_Int false= 0; HYPRE_Int ierr = 0; #if DEBUG /*char filename[255];*/ #endif hypre_MPI_Comm_rank(comm, &myproc); (maxwell_TV_data -> ndim)= ndim; /* Adjust rfactor so that the correct dimension is used */ for (i= ndim; i< 3; i++) { rfactor_in[0][i]= 1; } hypre_CopyIndex(rfactor_in[0], rfactor); /*--------------------------------------------------------------------- * Set up matrices Ann, Aen. * * Forming the finest node matrix: We are assuming the Aee_in is in the * parcsr data structure, the stencil structure for the node is the * 9 or 27 point fem pattern, etc. * * Need to form the grid, graph, etc. for these matrices. *---------------------------------------------------------------------*/ nparts= hypre_SStructMatrixNParts(Aee_in); HYPRE_SStructGridCreate(comm, ndim, nparts, &node_grid); /* grids can be constructed from the cell-centre grid of Aee_in */ vartypes= hypre_CTAlloc(HYPRE_SStructVariable, 1); vartypes[0]= HYPRE_SSTRUCT_VARIABLE_NODE; for (i= 0; i< nparts; i++) { pgrid= hypre_SStructPMatrixPGrid(hypre_SStructMatrixPMatrix(Aee_in, i)); sgrid= hypre_SStructPGridCellSGrid(pgrid); boxes= hypre_StructGridBoxes(sgrid); hypre_ForBoxI(j, boxes) { box= hypre_BoxArrayBox(boxes, j); HYPRE_SStructGridSetExtents(node_grid, i, hypre_BoxIMin(box), hypre_BoxIMax(box)); } HYPRE_SStructGridSetVariables(node_grid, i, 1, vartypes); } HYPRE_SStructGridAssemble(node_grid); /* Ann stencils & graph */ stencil_size= 1; for (i= 0; i< ndim; i++) { stencil_size*= 3; } Ann_stencils= hypre_CTAlloc(hypre_SStructStencil *, 1); HYPRE_SStructStencilCreate(ndim, stencil_size, &Ann_stencils[0]); vars= 0; /* scalar equation, node-to-node */ if (ndim > 2) { kstart= -1; kend = 2; } else if (ndim == 2) { kstart= 0; kend = 1; } m= 0; for (k= kstart; k< kend; k++) { for (j= -1; j< 2; j++) { for (i= -1; i< 2; i++) { hypre_SetIndex(shape, i, j, k); HYPRE_SStructStencilSetEntry(Ann_stencils[0], m, shape, vars); m++; } } } HYPRE_SStructGraphCreate(comm, node_grid, &node_graph); for (part= 0; part< nparts; part++) { HYPRE_SStructGraphSetStencil(node_graph, part, 0, Ann_stencils[0]); } HYPRE_SStructGraphAssemble(node_graph); HYPRE_SStructMatrixCreate(comm, node_graph, &Ann); HYPRE_SStructMatrixSetObjectType(Ann, HYPRE_PARCSR); HYPRE_SStructMatrixInitialize(Ann); /* Aen is constructed as an IJ matrix. Constructing it as a sstruct_matrix * would make it a square matrix. */ part= 0; i = 0; hypre_SStructGridBoxProcFindBoxManEntry(node_grid, part, 0, i, myproc, &entry); pgrid= hypre_SStructGridPGrid(node_grid, part); vartypes[0]= HYPRE_SSTRUCT_VARIABLE_NODE; j= vartypes[0]; sgrid= hypre_SStructPGridVTSGrid(pgrid, j); boxes= hypre_StructGridBoxes(sgrid); box = hypre_BoxArrayBox(boxes, 0); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(box), &jlower); hypre_SStructGridBoxProcFindBoxManEntry(grid, part, 0, i, myproc, &entry); pgrid= hypre_SStructGridPGrid(grid, part); /* grab the first edge variable type */ vartypes[0]= hypre_SStructPGridVarType(pgrid, 0); j= vartypes[0]; sgrid= hypre_SStructPGridVTSGrid(pgrid, j); boxes= hypre_StructGridBoxes(sgrid); box = hypre_BoxArrayBox(boxes, 0); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(box), &ilower); part = nparts-1; pgrid= hypre_SStructGridPGrid(node_grid, part); vartypes[0]= HYPRE_SSTRUCT_VARIABLE_NODE; j= vartypes[0]; sgrid= hypre_SStructPGridVTSGrid(pgrid, j); boxes= hypre_StructGridBoxes(sgrid); box = hypre_BoxArrayBox(boxes, hypre_BoxArraySize(boxes)-1); hypre_SStructGridBoxProcFindBoxManEntry(node_grid, part, 0, hypre_BoxArraySize(boxes)-1, myproc, &entry); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(box), &jupper); pgrid= hypre_SStructGridPGrid(grid, part); vars = hypre_SStructPGridNVars(pgrid); vartypes[0]= hypre_SStructPGridVarType(pgrid, vars-1); j= vartypes[0]; sgrid= hypre_SStructPGridVTSGrid(pgrid, j); boxes= hypre_StructGridBoxes(sgrid); box = hypre_BoxArrayBox(boxes, hypre_BoxArraySize(boxes)-1); hypre_TFree(vartypes); hypre_SStructGridBoxProcFindBoxManEntry(grid, part, vars-1, hypre_BoxArraySize(boxes)-1, myproc, &entry); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(box), &iupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &Aen); HYPRE_IJMatrixSetObjectType(Aen, HYPRE_PARCSR); HYPRE_IJMatrixInitialize(Aen); /* setup the Aen & Ann using matrix-matrix products * Aen's parscr matrix has not been formed yet-> fill up ij_matrix */ parcsr_mat= hypre_ParMatmul(Aee, T); HYPRE_ParCSRMatrixGetLocalRange((HYPRE_ParCSRMatrix) parcsr_mat, &first_local_row, &last_local_row, &first_local_col, &last_local_col); for (i= first_local_row; i<= last_local_row; i++) { HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) parcsr_mat, i, &size, &col_inds, &values); HYPRE_IJMatrixSetValues(Aen, 1, &size, &i, (const HYPRE_Int *) col_inds, (const double *) values); HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) parcsr_mat, i, &size, &col_inds, &values); } hypre_ParCSRMatrixDestroy(parcsr_mat); HYPRE_IJMatrixAssemble(Aen); /* Ann's parscr matrix has not been formed yet-> fill up ij_matrix */ hypre_ParCSRMatrixTranspose(T, &T_transpose, 1); parcsr_mat= hypre_ParMatmul(T_transpose, (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Aen)); HYPRE_ParCSRMatrixGetLocalRange((HYPRE_ParCSRMatrix) parcsr_mat, &first_local_row, &last_local_row, &first_local_col, &last_local_col); for (i= first_local_row; i<= last_local_row; i++) { HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) parcsr_mat, i, &size, &col_inds, &values); HYPRE_IJMatrixSetValues(hypre_SStructMatrixIJMatrix(Ann), 1, &size, &i, (const HYPRE_Int *) col_inds, (const double *) values); HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) parcsr_mat, i, &size, &col_inds, &values); } hypre_ParCSRMatrixDestroy(parcsr_mat); /* set the physical boundary points to identity */ nrows= 0; for (part= 0; part< nparts; part++) { pgrid = hypre_SStructGridPGrid(node_grid, part); sgrid = hypre_SStructPGridSGrid(pgrid, 0); nrows+= hypre_StructGridLocalSize(sgrid); } flag = hypre_CTAlloc(HYPRE_Int, nrows); flag2= hypre_CTAlloc(HYPRE_Int, nrows); for (i= 0; i< nrows; i++) { flag[i]= 1; } /* Determine physical boundary points. Get the rank and set flag[rank]= rank. This will boundary point, i.e., ncols[rank]> 0 will flag a boundary point. */ start_rank= hypre_SStructGridStartRank(node_grid); for (part= 0; part< nparts; part++) { pgrid = hypre_SStructGridPGrid(node_grid, part); sgrid = hypre_SStructPGridSGrid(pgrid, 0); boxes = hypre_StructGridBoxes(sgrid); node_boxman = hypre_SStructGridBoxManager(node_grid, part, 0); hypre_ForBoxI(j, boxes) { box= hypre_BoxArrayBox(boxes, j); hypre_BoxManGetEntry(node_boxman, myproc, j, &entry); i= hypre_BoxVolume(box); tmp_box_array= hypre_BoxArrayCreate(0); ierr += hypre_BoxBoundaryG(box, sgrid, tmp_box_array); for (m= 0; m< hypre_BoxArraySize(tmp_box_array); m++) { box_piece= hypre_BoxArrayBox(tmp_box_array, m); if (hypre_BoxVolume(box_piece) < i) { hypre_BoxGetSize(box_piece, loop_size); hypre_CopyIndex(hypre_BoxIMin(box_piece), start); hypre_BoxLoop0Begin(ndim, loop_size); #if 0 /* Are private static arrays a problem? */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,rank) HYPRE_SMP_SCHEDULE #endif #endif hypre_BoxLoop0For() { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex(index, lindex[0], lindex[1], lindex[2]); hypre_AddIndex(index, start, index); hypre_SStructBoxManEntryGetGlobalRank(entry, index, &rank, matrix_type); flag[rank-start_rank] = 0; flag2[rank-start_rank]= rank; } hypre_BoxLoop0End(); } /* if (hypre_BoxVolume(box_piece) < i) */ } /* for (m= 0; m< hypre_BoxArraySize(tmp_box_array); m++) */ hypre_BoxArrayDestroy(tmp_box_array); } /* hypre_ForBoxI(j, boxes) */ } /* for (part= 0; part< nparts; part++) */ /* set up boundary identity */ j= 0; for (i= 0; i< nrows; i++) { if (!flag[i]) { j++; } } inode= hypre_CTAlloc(HYPRE_Int, j); ncols= hypre_CTAlloc(HYPRE_Int, j); jnode= hypre_CTAlloc(HYPRE_Int, j); vals = hypre_TAlloc(double, j); j= 0; for (i= 0; i< nrows; i++) { if (!flag[i]) { ncols[j]= 1; inode[j]= flag2[i]; jnode[j]= flag2[i]; vals[j] = 1.0; j++; } } hypre_TFree(flag); hypre_TFree(flag2); HYPRE_IJMatrixSetValues(hypre_SStructMatrixIJMatrix(Ann), j, ncols, (const HYPRE_Int*) inode, (const HYPRE_Int*) jnode, (const double*) vals); hypre_TFree(ncols); hypre_TFree(inode); hypre_TFree(jnode); hypre_TFree(vals); HYPRE_SStructMatrixAssemble(Ann); #if DEBUG HYPRE_SStructMatrixPrint("sstruct.out.Ann", Ann, 0); HYPRE_IJMatrixPrint(Aen, "driver.out.Aen"); #endif /* setup bn & xn using matvec. Assemble first and then perform matvec to get the nodal rhs and initial guess. */ HYPRE_SStructVectorCreate(comm, node_grid, &bn); HYPRE_SStructVectorSetObjectType(bn, HYPRE_PARCSR); HYPRE_SStructVectorInitialize(bn); HYPRE_SStructVectorAssemble(bn); hypre_SStructVectorConvert(b_in, &parvector_x); /*HYPRE_SStructVectorGetObject((HYPRE_SStructVector) b_in, (void **) &parvector_x);*/ HYPRE_SStructVectorGetObject((HYPRE_SStructVector) bn, (void **) &parvector_b); hypre_ParCSRMatrixMatvec(1.0, T_transpose, parvector_x, 0.0, parvector_b); HYPRE_SStructVectorCreate(comm, node_grid, &xn); HYPRE_SStructVectorSetObjectType(xn, HYPRE_PARCSR); HYPRE_SStructVectorInitialize(xn); HYPRE_SStructVectorAssemble(xn); hypre_SStructVectorConvert(x_in, &parvector_x); /*HYPRE_SStructVectorGetObject((HYPRE_SStructVector) x_in, (void **) &parvector_x);*/ HYPRE_SStructVectorGetObject((HYPRE_SStructVector) xn, (void **) &parvector_b); hypre_ParCSRMatrixMatvec(1.0, T_transpose, parvector_x, 0.0, parvector_b); /* Destroy the node grid and graph. This only decrements reference counters. */ HYPRE_SStructGridDestroy(node_grid); HYPRE_SStructGraphDestroy(node_graph); /* create the multigrid components for the nodal matrix using amg. We need to extract the nodal mg components to form the system mg components. */ amg_vdata= (void *) hypre_BoomerAMGCreate(); hypre_BoomerAMGSetStrongThreshold(amg_vdata, 0.25); hypre_BoomerAMGSetup(amg_vdata, hypre_SStructMatrixParCSRMatrix(Ann), hypre_SStructVectorParVector(bn), hypre_SStructVectorParVector(xn)); { amg_data = amg_vdata; node_numlevels= hypre_ParAMGDataNumLevels(amg_data); Ann_l = hypre_CTAlloc(hypre_ParCSRMatrix *, node_numlevels); Pn_l = hypre_CTAlloc(hypre_ParCSRMatrix *, node_numlevels); RnT_l = hypre_CTAlloc(hypre_ParCSRMatrix *, node_numlevels); bn_l = hypre_CTAlloc(hypre_ParVector*, node_numlevels); xn_l = hypre_CTAlloc(hypre_ParVector*, node_numlevels); resn_l = hypre_CTAlloc(hypre_ParVector*, node_numlevels); en_l = hypre_CTAlloc(hypre_ParVector*, node_numlevels); nVtemp_l= hypre_CTAlloc(hypre_ParVector*, node_numlevels); nVtemp2_l= hypre_CTAlloc(hypre_ParVector*, node_numlevels); /* relaxation parameters */ nCF_marker_l = hypre_CTAlloc(HYPRE_Int *, node_numlevels); nrelax_weight= hypre_CTAlloc(double , node_numlevels); nomega = hypre_CTAlloc(double , node_numlevels); nrelax_type = 6; /* fast parallel hybrid */ for (i= 0; i< node_numlevels; i++) { Ann_l[i]= (hypre_ParAMGDataAArray(amg_data))[i]; Pn_l[i] = hypre_ParAMGDataPArray(amg_data)[i]; RnT_l[i]= hypre_ParAMGDataRArray(amg_data)[i]; bn_l[i] = hypre_ParAMGDataFArray(amg_data)[i]; xn_l[i] = hypre_ParAMGDataUArray(amg_data)[i]; /* create temporary vectors */ resn_l[i]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Ann_l[i]), hypre_ParCSRMatrixGlobalNumRows(Ann_l[i]), hypre_ParCSRMatrixRowStarts(Ann_l[i])); hypre_ParVectorInitialize(resn_l[i]); hypre_ParVectorSetPartitioningOwner(resn_l[i], 0); en_l[i]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Ann_l[i]), hypre_ParCSRMatrixGlobalNumRows(Ann_l[i]), hypre_ParCSRMatrixRowStarts(Ann_l[i])); hypre_ParVectorInitialize(en_l[i]); hypre_ParVectorSetPartitioningOwner(en_l[i], 0); nVtemp_l[i]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Ann_l[i]), hypre_ParCSRMatrixGlobalNumRows(Ann_l[i]), hypre_ParCSRMatrixRowStarts(Ann_l[i])); hypre_ParVectorInitialize(nVtemp_l[i]); hypre_ParVectorSetPartitioningOwner(nVtemp_l[i], 0); nVtemp2_l[i]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Ann_l[i]), hypre_ParCSRMatrixGlobalNumRows(Ann_l[i]), hypre_ParCSRMatrixRowStarts(Ann_l[i])); hypre_ParVectorInitialize(nVtemp2_l[i]); hypre_ParVectorSetPartitioningOwner(nVtemp2_l[i], 0); nCF_marker_l[i] = hypre_ParAMGDataCFMarkerArray(amg_data)[i]; nrelax_weight[i]= hypre_ParAMGDataRelaxWeight(amg_data)[i]; nomega[i] = hypre_ParAMGDataOmega(amg_data)[i]; } } (maxwell_TV_data -> Ann_stencils) = Ann_stencils; (maxwell_TV_data -> T_transpose) = T_transpose; (maxwell_TV_data -> Ann) = Ann; (maxwell_TV_data -> Aen) = Aen; (maxwell_TV_data -> bn) = bn; (maxwell_TV_data -> xn) = xn; (maxwell_TV_data -> amg_vdata) = amg_vdata; (maxwell_TV_data -> Ann_l) = Ann_l; (maxwell_TV_data -> Pn_l) = Pn_l; (maxwell_TV_data -> RnT_l) = RnT_l; (maxwell_TV_data -> bn_l) = bn_l; (maxwell_TV_data -> xn_l) = xn_l; (maxwell_TV_data -> resn_l) = resn_l; (maxwell_TV_data -> en_l) = en_l; (maxwell_TV_data -> nVtemp_l) = nVtemp_l; (maxwell_TV_data -> nVtemp2_l) = nVtemp2_l; (maxwell_TV_data -> nCF_marker_l) = nCF_marker_l; (maxwell_TV_data -> nrelax_weight) = nrelax_weight; (maxwell_TV_data -> nomega) = nomega; (maxwell_TV_data -> nrelax_type) = nrelax_type; (maxwell_TV_data -> node_numlevels) = node_numlevels; /* coarsen the edge matrix. Will coarsen uniformly since we have no * scheme to semi-coarsen. That is, coarsen wrt to rfactor, with * rfactor[i] > 1 for i < ndim. * Determine the number of levels for the edge problem */ cboxes= hypre_BoxArrayCreate(0); coarsen= hypre_CTAlloc(HYPRE_Int, nparts); edge_maxlevels= 0; for (part= 0; part< nparts; part++) { pgrid= hypre_SStructGridPGrid(grid, part); sgrid= hypre_SStructPGridCellSGrid(pgrid); box= hypre_BoxDuplicate(hypre_StructGridBoundingBox(sgrid)); hypre_AppendBox(box, cboxes); /* since rfactor[i]>1, the following i will be an upper bound of the number of levels. */ i = hypre_Log2(hypre_BoxSizeD(box, 0)) + 2 + hypre_Log2(hypre_BoxSizeD(box, 1)) + 2 + hypre_Log2(hypre_BoxSizeD(box, 2)) + 2; hypre_BoxDestroy(box); /* the following allows some of the parts to have volume zero grids */ edge_maxlevels= hypre_max(edge_maxlevels, i); coarsen[part] = true; } if ((maxwell_TV_data-> edge_maxlevels) > 0) { edge_maxlevels= hypre_min(edge_maxlevels, (maxwell_TV_data -> edge_maxlevels)); } (maxwell_TV_data -> edge_maxlevels)= edge_maxlevels; /* form the edge grids: coarsen the cell grid on each part and then set the boxes of these grids to be the boxes of the sstruct_grid. */ egrid_l = hypre_TAlloc(hypre_SStructGrid *, edge_maxlevels); hypre_SStructGridRef(grid, &egrid_l[0]); /* form the topological grids for the topological matrices. */ /* Assuming same variable ordering on all parts */ pgrid= hypre_SStructGridPGrid(grid, 0); HYPRE_SStructGridCreate(comm, ndim, nparts, &edge_grid); vartype_edges= hypre_CTAlloc(HYPRE_SStructVariable, ndim); if (ndim > 2) { HYPRE_SStructGridCreate(comm, ndim, nparts, &face_grid); vartype_faces= hypre_CTAlloc(HYPRE_SStructVariable, ndim); for (i= 0; i< 3; i++) { vartype_edges[2]= hypre_SStructPGridVarType(pgrid, i); j= vartype_edges[2]; switch(j) { case 5: { vartype_edges[i]= HYPRE_SSTRUCT_VARIABLE_XEDGE; vartype_faces[i]= HYPRE_SSTRUCT_VARIABLE_XFACE; break; } case 6: { vartype_edges[i]= HYPRE_SSTRUCT_VARIABLE_YEDGE; vartype_faces[i]= HYPRE_SSTRUCT_VARIABLE_YFACE; break; } case 7: { vartype_edges[i]= HYPRE_SSTRUCT_VARIABLE_ZEDGE; vartype_faces[i]= HYPRE_SSTRUCT_VARIABLE_ZFACE; break; } } /* switch(j) */ } /* for (i= 0; i< 3; i++) */ } else { for (i= 0; i< 2; i++) { vartype_edges[1]= hypre_SStructPGridVarType(pgrid, i); j= vartype_edges[1]; switch(j) { case 2: { vartype_edges[i]= HYPRE_SSTRUCT_VARIABLE_XFACE; break; } case 3: { vartype_edges[i]= HYPRE_SSTRUCT_VARIABLE_YFACE; break; } } /* switch(j) */ } /* for (i= 0; i< 3; i++) */ } HYPRE_SStructGridCreate(comm, ndim, nparts, &cell_grid); vartype_cell= hypre_CTAlloc(HYPRE_SStructVariable, 1); vartype_cell[0]= HYPRE_SSTRUCT_VARIABLE_CELL; for (i= 0; i< nparts; i++) { pgrid= hypre_SStructPMatrixPGrid(hypre_SStructMatrixPMatrix(Aee_in, i)); sgrid= hypre_SStructPGridCellSGrid(pgrid); boxes= hypre_StructGridBoxes(sgrid); hypre_ForBoxI(j, boxes) { box= hypre_BoxArrayBox(boxes, j); HYPRE_SStructGridSetExtents(edge_grid, i, hypre_BoxIMin(box), hypre_BoxIMax(box)); HYPRE_SStructGridSetExtents(cell_grid, i, hypre_BoxIMin(box), hypre_BoxIMax(box)); if (ndim > 2) { HYPRE_SStructGridSetExtents(face_grid, i, hypre_BoxIMin(box), hypre_BoxIMax(box)); } } HYPRE_SStructGridSetVariables(edge_grid, i, ndim, vartype_edges); HYPRE_SStructGridSetVariables(cell_grid, i, 1, vartype_cell); if (ndim > 2) { HYPRE_SStructGridSetVariables(face_grid, i, ndim, vartype_faces); } } HYPRE_SStructGridAssemble(edge_grid); topological_edge = hypre_TAlloc(hypre_SStructGrid *, edge_maxlevels); topological_edge[0]= edge_grid; HYPRE_SStructGridAssemble(cell_grid); topological_cell = hypre_TAlloc(hypre_SStructGrid *, edge_maxlevels); topological_cell[0]= cell_grid; if (ndim > 2) { HYPRE_SStructGridAssemble(face_grid); topological_face= hypre_TAlloc(hypre_SStructGrid *, edge_maxlevels); topological_face[0]= face_grid; } /*-------------------------------------------------------------------------- * to determine when to stop coarsening, we check the cell bounding boxes * of the level egrid. After each coarsening, the bounding boxes are * replaced by the generated coarse egrid cell bounding boxes. *--------------------------------------------------------------------------*/ hypre_SetIndex(cindex, 0, 0, 0); j= 0; /* j tracks the number of parts that have been coarsened away */ edge_numlevels= 1; for (l= 0; ; l++) { HYPRE_SStructGridCreate(comm, ndim, nparts, &egrid_l[l+1]); HYPRE_SStructGridCreate(comm, ndim, nparts, &topological_edge[l+1]); HYPRE_SStructGridCreate(comm, ndim, nparts, &topological_cell[l+1]); if (ndim > 2) { HYPRE_SStructGridCreate(comm, ndim, nparts, &topological_face[l+1]); } /* coarsen the non-zero bounding boxes only if we have some. */ nboxes= 0; if (j < nparts) { for (part= 0; part< nparts; part++) { pgrid= hypre_SStructGridPGrid(egrid_l[l], part); sgrid= hypre_SStructPGridCellSGrid(pgrid); if (coarsen[part]) { box= hypre_BoxArrayBox(cboxes, part); m= true; for (i= 0; i< ndim; i++) { if ( hypre_BoxIMaxD(box, i) < hypre_BoxIMinD(box, i) ) { m= false; break; } } if (m) { /* MAY NEED TO CHECK THE FOLLOWING MORE CAREFULLY: */ /* should we decrease this bounding box so that we get the correct coarse bounding box? Recall that we will decrease each box of the cell_grid so that exact rfactor divisibility is attained. Project does not automatically perform this. E.g., consider a grid with only one box whose width does not divide by rfactor, but it contains beginning and ending indices that are divisible by rfactor. Then an extra coarse grid layer is given by project. */ contract_box= hypre_BoxContraction(box, sgrid, rfactor); hypre_CopyBox(contract_box, box); hypre_BoxDestroy(contract_box); hypre_ProjectBox(box, cindex, rfactor); hypre_StructMapFineToCoarse(hypre_BoxIMin(box), cindex, rfactor, hypre_BoxIMin(box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(box), cindex, rfactor, hypre_BoxIMax(box)); /* build the coarse edge grids. Only fill up box extents. The boxes of the grid may be contracted. Note that the box projection may not perform the contraction. */ k= 0; hypre_CoarsenPGrid(egrid_l[l], cindex, rfactor, part, egrid_l[l+1], &k); /* build the topological grids */ hypre_CoarsenPGrid(topological_edge[l], cindex, rfactor, part, topological_edge[l+1], &i); hypre_CoarsenPGrid(topological_cell[l], cindex, rfactor, part, topological_cell[l+1], &i); if (ndim > 2) { hypre_CoarsenPGrid(topological_face[l], cindex, rfactor, part, topological_face[l+1], &i); } nboxes+= k; } else { /* record empty, coarsened-away part */ coarsen[part]= false; /* set up a dummy box so this grid can be destroyed */ HYPRE_SStructGridSetExtents(egrid_l[l+1], part, hypre_BoxIMin(box), hypre_BoxIMin(box)); HYPRE_SStructGridSetExtents(topological_edge[l+1], part, hypre_BoxIMin(box), hypre_BoxIMin(box)); HYPRE_SStructGridSetExtents(topological_cell[l+1], part, hypre_BoxIMin(box), hypre_BoxIMin(box)); if (ndim > 2) { HYPRE_SStructGridSetExtents(topological_face[l+1], part, hypre_BoxIMin(box), hypre_BoxIMin(box)); } j++; } } /* if (coarsen[part]) */ vartypes= hypre_SStructPGridVarTypes( hypre_SStructGridPGrid(egrid_l[l], part)); HYPRE_SStructGridSetVariables(egrid_l[l+1], part, ndim, vartypes); HYPRE_SStructGridSetVariables(topological_edge[l+1], part, ndim, vartype_edges); HYPRE_SStructGridSetVariables(topological_cell[l+1], part, 1, vartype_cell); if (ndim > 2) { HYPRE_SStructGridSetVariables(topological_face[l+1], part, ndim, vartype_faces); } } /* for (part= 0; part< nparts; part++) */ } /* if (j < nparts) */ HYPRE_SStructGridAssemble(egrid_l[l+1]); HYPRE_SStructGridAssemble(topological_edge[l+1]); HYPRE_SStructGridAssemble(topological_cell[l+1]); if (ndim > 2) { HYPRE_SStructGridAssemble(topological_face[l+1]); } lev_nboxes= 0; hypre_MPI_Allreduce(&nboxes, &lev_nboxes, 1, HYPRE_MPI_INT, hypre_MPI_SUM, hypre_SStructGridComm(egrid_l[l+1])); if (lev_nboxes) /* there were coarsen boxes */ { edge_numlevels++; } else { /* no coarse boxes. Trigger coarsening completed and destroy the cgrids corresponding to this level. */ j= nparts; } /* extract the cell bounding boxes */ if (j < nparts) { for (part= 0; part< nparts; part++) { if (coarsen[part]) { pgrid= hypre_SStructGridPGrid(egrid_l[l+1], part); sgrid= hypre_SStructPGridCellSGrid(pgrid); box= hypre_BoxDuplicate(hypre_StructGridBoundingBox(sgrid)); hypre_CopyBox(box, hypre_BoxArrayBox(cboxes,part)); hypre_BoxDestroy(box); } } } else { HYPRE_SStructGridDestroy(egrid_l[l+1]); HYPRE_SStructGridDestroy(topological_edge[l+1]); HYPRE_SStructGridDestroy(topological_cell[l+1]); if (ndim > 2) { HYPRE_SStructGridDestroy(topological_face[l+1]); } break; } } (maxwell_TV_data -> egrid_l)= egrid_l; hypre_Maxwell_PhysBdy(egrid_l, edge_numlevels, rfactor, &BdryRanks_l, &BdryRanksCnts_l); (maxwell_TV_data -> BdryRanks_l) = BdryRanks_l; (maxwell_TV_data -> BdryRanksCnts_l)= BdryRanksCnts_l; hypre_BoxArrayDestroy(cboxes); hypre_TFree(coarsen); /* okay to de-allocate vartypes now */ hypre_TFree(vartype_edges); hypre_TFree(vartype_cell); if (ndim > 2) { hypre_TFree(vartype_faces); } /* Aen matrices are defined for min(edge_numlevels, node_numlevels). */ en_numlevels= hypre_min(edge_numlevels, node_numlevels); (maxwell_TV_data -> en_numlevels) = en_numlevels; (maxwell_TV_data -> edge_numlevels)= edge_numlevels; Aee_l= hypre_TAlloc(hypre_ParCSRMatrix *, edge_numlevels); Aen_l= hypre_TAlloc(hypre_ParCSRMatrix *, en_numlevels); /* Pe_l are defined to be IJ matrices rather than directly parcsr. This was done so that in the topological formation, some of the ij matrix routines can be used. */ Pe_l = hypre_TAlloc(hypre_IJMatrix *, edge_numlevels-1); ReT_l = hypre_TAlloc(hypre_IJMatrix *, edge_numlevels-1); be_l = hypre_TAlloc(hypre_ParVector *, edge_numlevels); xe_l = hypre_TAlloc(hypre_ParVector *, edge_numlevels); rese_l = hypre_TAlloc(hypre_ParVector *, edge_numlevels); ee_l = hypre_TAlloc(hypre_ParVector *, edge_numlevels); eVtemp_l= hypre_TAlloc(hypre_ParVector *, edge_numlevels); eVtemp2_l= hypre_TAlloc(hypre_ParVector *, edge_numlevels); Aee_l[0]= hypre_SStructMatrixParCSRMatrix(Aee_in); Aen_l[0]=(hypre_ParCSRMatrix *) hypre_IJMatrixObject(Aen), be_l[0] = hypre_SStructVectorParVector(b_in); xe_l[0] = hypre_SStructVectorParVector(x_in); rese_l[0]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[0]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[0]), hypre_ParCSRMatrixRowStarts(Aee_l[0])); hypre_ParVectorInitialize(rese_l[0]); hypre_ParVectorSetPartitioningOwner(rese_l[0], 0); ee_l[0]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[0]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[0]), hypre_ParCSRMatrixRowStarts(Aee_l[0])); hypre_ParVectorInitialize(ee_l[0]); hypre_ParVectorSetPartitioningOwner(ee_l[0], 0); eVtemp_l[0]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[0]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[0]), hypre_ParCSRMatrixRowStarts(Aee_l[0])); hypre_ParVectorInitialize(eVtemp_l[0]); hypre_ParVectorSetPartitioningOwner(eVtemp_l[0], 0); eVtemp2_l[0]= hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[0]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[0]), hypre_ParCSRMatrixRowStarts(Aee_l[0])); hypre_ParVectorInitialize(eVtemp2_l[0]); hypre_ParVectorSetPartitioningOwner(eVtemp2_l[0], 0); for (l = 0; l < (en_numlevels - 1); l++) { if (l < edge_numlevels) /* create edge operators */ { if (!constant_coef) { void *PTopology_vdata; hypre_PTopology *PTopology; hypre_CreatePTopology(&PTopology_vdata); if (ndim > 2) { Pe_l[l]= hypre_Maxwell_PTopology(topological_edge[l], topological_edge[l+1], topological_face[l], topological_face[l+1], topological_cell[l], topological_cell[l+1], Aee_l[l], rfactor, PTopology_vdata); } else { /* two-dim case: edges= faces but stored in edge grid */ Pe_l[l]= hypre_Maxwell_PTopology(topological_edge[l], topological_edge[l+1], topological_edge[l], topological_edge[l+1], topological_cell[l], topological_cell[l+1], Aee_l[l], rfactor, PTopology_vdata); } PTopology= PTopology_vdata; /* extract off-processors rows of Pe_l[l]. Needed for amge.*/ hypre_SStructSharedDOF_ParcsrMatRowsComm(egrid_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), &num_OffProcRows, &OffProcRows); if (ndim == 3) { hypre_ND1AMGeInterpolation(Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Face_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Face), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), num_OffProcRows, OffProcRows, Pe_l[l]); } else { hypre_ND1AMGeInterpolation(Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), num_OffProcRows, OffProcRows, Pe_l[l]); } hypre_DestroyPTopology(PTopology_vdata); for (i= 0; i< num_OffProcRows; i++) { hypre_MaxwellOffProcRowDestroy((void *) OffProcRows[i]); } hypre_TFree(OffProcRows); } else { Pe_l[l]= hypre_Maxwell_PNedelec(topological_edge[l], topological_edge[l+1], rfactor); } #if DEBUG #endif ReT_l[l]= Pe_l[l]; hypre_BoomerAMGBuildCoarseOperator( (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), &Aee_l[l+1]); /* zero off boundary points */ hypre_ParCSRMatrixEliminateRowsCols(Aee_l[l+1], BdryRanksCnts_l[l+1], BdryRanks_l[l+1]); hypre_ParCSRMatrixTranspose( (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), &transpose, 1); parcsr_mat= hypre_ParMatmul(transpose, Aen_l[l]); Aen_l[l+1]= hypre_ParMatmul(parcsr_mat, Pn_l[l]); hypre_ParCSRMatrixDestroy(parcsr_mat); hypre_ParCSRMatrixDestroy(transpose); xe_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(xe_l[l+1]); hypre_ParVectorSetPartitioningOwner(xe_l[l+1], 0); be_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(be_l[l+1]); hypre_ParVectorSetPartitioningOwner(be_l[l+1],0); rese_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(rese_l[l+1]); hypre_ParVectorSetPartitioningOwner(rese_l[l+1],0); ee_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(ee_l[l+1]); hypre_ParVectorSetPartitioningOwner(ee_l[l+1],0); eVtemp_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(eVtemp_l[l+1]); hypre_ParVectorSetPartitioningOwner(eVtemp_l[l+1],0); eVtemp2_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(eVtemp2_l[l+1]); hypre_ParVectorSetPartitioningOwner(eVtemp2_l[l+1],0); } /* if (l < edge_numlevels) */ } /* for (l = 0; l < (en_numlevels - 1); l++) */ /* possible to have more edge levels */ for (l = (en_numlevels-1); l < (edge_numlevels - 1); l++) { if (!constant_coef) { void *PTopology_vdata; hypre_PTopology *PTopology; hypre_CreatePTopology(&PTopology_vdata); if (ndim > 2) { Pe_l[l]= hypre_Maxwell_PTopology(topological_edge[l], topological_edge[l+1], topological_face[l], topological_face[l+1], topological_cell[l], topological_cell[l+1], Aee_l[l], rfactor, PTopology_vdata); } else { Pe_l[l]= hypre_Maxwell_PTopology(topological_edge[l], topological_edge[l+1], topological_edge[l], topological_edge[l+1], topological_cell[l], topological_cell[l+1], Aee_l[l], rfactor, PTopology_vdata); } PTopology= PTopology_vdata; /* extract off-processors rows of Pe_l[l]. Needed for amge.*/ hypre_SStructSharedDOF_ParcsrMatRowsComm(egrid_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), &num_OffProcRows, &OffProcRows); if (ndim == 3) { hypre_ND1AMGeInterpolation(Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Face_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Face), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), num_OffProcRows, OffProcRows, Pe_l[l]); } else { hypre_ND1AMGeInterpolation(Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Edge_iedge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), (hypre_ParCSRMatrix *) hypre_IJMatrixObject(PTopology -> Element_Edge), num_OffProcRows, OffProcRows, Pe_l[l]); } hypre_DestroyPTopology(PTopology_vdata); for (i= 0; i< num_OffProcRows; i++) { hypre_MaxwellOffProcRowDestroy((void *) OffProcRows[i]); } hypre_TFree(OffProcRows); } else { Pe_l[l]= hypre_Maxwell_PNedelec(topological_edge[l], topological_edge[l+1], rfactor); } ReT_l[l]= Pe_l[l]; hypre_BoomerAMGBuildCoarseOperator( (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), Aee_l[l], (hypre_ParCSRMatrix *) hypre_IJMatrixObject(Pe_l[l]), &Aee_l[l+1]); /* zero off boundary points */ hypre_ParCSRMatrixEliminateRowsCols(Aee_l[l+1], BdryRanksCnts_l[l+1], BdryRanks_l[l+1]); xe_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(xe_l[l+1]); hypre_ParVectorSetPartitioningOwner(xe_l[l+1], 0); be_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(be_l[l+1]); hypre_ParVectorSetPartitioningOwner(be_l[l+1],0); ee_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(ee_l[l+1]); hypre_ParVectorSetPartitioningOwner(ee_l[l+1],0); rese_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(rese_l[l+1]); hypre_ParVectorSetPartitioningOwner(rese_l[l+1],0); eVtemp_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(eVtemp_l[l+1]); hypre_ParVectorSetPartitioningOwner(eVtemp_l[l+1],0); eVtemp2_l[l+1] = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(Aee_l[l+1]), hypre_ParCSRMatrixGlobalNumRows(Aee_l[l+1]), hypre_ParCSRMatrixRowStarts(Aee_l[l+1])); hypre_ParVectorInitialize(eVtemp2_l[l+1]); hypre_ParVectorSetPartitioningOwner(eVtemp2_l[l+1],0); } /* Can delete all topological grids. Not even referenced in IJMatrices. */ for (l = 0; l < edge_numlevels; l++) { HYPRE_SStructGridDestroy(topological_edge[l]); HYPRE_SStructGridDestroy(topological_cell[l]); if (ndim > 2) { HYPRE_SStructGridDestroy(topological_face[l]); } } hypre_TFree(topological_edge); hypre_TFree(topological_cell); if (ndim > 2) { hypre_TFree(topological_face); } #if DEBUG #endif (maxwell_TV_data -> Aee_l) = Aee_l; (maxwell_TV_data -> Aen_l) = Aen_l; (maxwell_TV_data -> Pe_l) = Pe_l; (maxwell_TV_data -> ReT_l) = ReT_l; (maxwell_TV_data -> xe_l) = xe_l; (maxwell_TV_data -> be_l) = be_l; (maxwell_TV_data -> ee_l) = ee_l; (maxwell_TV_data -> rese_l) = rese_l; (maxwell_TV_data -> eVtemp_l) = eVtemp_l; (maxwell_TV_data -> eVtemp2_l)= eVtemp2_l; /*----------------------------------------------------- * Determine relaxation parameters for edge problems. * Needed for quick parallel over/under-relaxation. *-----------------------------------------------------*/ erelax_type = 2; erelax_weight= hypre_TAlloc(double, edge_numlevels); eomega = hypre_TAlloc(double, edge_numlevels); eCF_marker_l = hypre_TAlloc(HYPRE_Int *, edge_numlevels); relax_type= 6; /* SSOR */ /*for (l= 0; l< 1; l++) { erelax_weight[l]= 1.0; eCF_marker_l[l]= NULL; e_amg_vdata= (void *) hypre_BoomerAMGCreate(); e_amgData= e_amg_vdata; relax_types= hypre_CTAlloc(HYPRE_Int, 2); relax_types[1]= relax_type; amg_CF_marker= hypre_TAlloc(HYPRE_Int *, 1); A_array = hypre_TAlloc(hypre_ParCSRMatrix *, 1); amg_CF_marker[0]= NULL; A_array[0] = Aee_l[l]; (e_amgData -> CF_marker_array) = amg_CF_marker; (e_amgData -> A_array) = A_array; (e_amgData -> Vtemp ) = eVtemp_l[l]; (e_amgData -> grid_relax_type) = relax_types; (e_amgData -> smooth_num_levels) = 0; (e_amgData -> smooth_type) = 0; hypre_BoomerAMGCGRelaxWt((void *) e_amgData, 0, numCGSweeps, &eomega[l]); hypre_TFree((e_amgData -> A_array)); hypre_TFree((e_amgData -> CF_marker_array)); hypre_TFree((e_amgData -> grid_relax_type)); (e_amgData -> A_array)= NULL; (e_amgData -> Vtemp ) = NULL; (e_amgData -> CF_marker_array)= NULL; (e_amgData -> grid_relax_type)= NULL; hypre_TFree(e_amg_vdata); eomega[l]= 1.0; }*/ for (l= 0; l< edge_numlevels; l++) { erelax_weight[l]= 1.0; eomega[l]= 1.0; eCF_marker_l[l]= NULL; } (maxwell_TV_data -> erelax_type) = erelax_type; (maxwell_TV_data -> erelax_weight)= erelax_weight; (maxwell_TV_data -> eomega) = eomega; (maxwell_TV_data -> eCF_marker_l) = eCF_marker_l; /*----------------------------------------------------- * Allocate space for log info *-----------------------------------------------------*/ if ((maxwell_TV_data -> logging) > 0) { i= (maxwell_TV_data -> max_iter); (maxwell_TV_data -> norms) = hypre_TAlloc(double, i); (maxwell_TV_data -> rel_norms) = hypre_TAlloc(double, i); } return ierr; } HYPRE_Int hypre_CoarsenPGrid( hypre_SStructGrid *fgrid, hypre_Index index, hypre_Index stride, HYPRE_Int part, hypre_SStructGrid *cgrid, HYPRE_Int *nboxes) { HYPRE_Int ierr = 0; hypre_SStructPGrid *pgrid= hypre_SStructGridPGrid(fgrid, part); hypre_StructGrid *sgrid= hypre_SStructPGridCellSGrid(pgrid); hypre_BoxArray *boxes; hypre_Box *box, *contract_box; HYPRE_Int i; /*----------------------------------------- * Set the coarse sgrid *-----------------------------------------*/ boxes = hypre_BoxArrayDuplicate(hypre_StructGridBoxes(sgrid)); for (i = 0; i < hypre_BoxArraySize(boxes); i++) { box = hypre_BoxArrayBox(boxes, i); /* contract box so that divisible by stride */ contract_box= hypre_BoxContraction(box, sgrid, stride); hypre_ProjectBox(contract_box, index, stride); hypre_StructMapFineToCoarse(hypre_BoxIMin(contract_box), index, stride, hypre_BoxIMin(contract_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(contract_box), index, stride, hypre_BoxIMax(contract_box)); /* set box even if zero volume but don't count it */ HYPRE_SStructGridSetExtents(cgrid, part, hypre_BoxIMin(contract_box), hypre_BoxIMax(contract_box)); if ( hypre_BoxVolume(contract_box) ) { *nboxes= *nboxes+1; } hypre_BoxDestroy(contract_box); } hypre_BoxArrayDestroy(boxes); return ierr; } /*-------------------------------------------------------------------------- * Contracts a box so that the resulting box divides evenly into rfactor. * Contraction is done in the (+) or (-) direction that does not have * neighbor boxes, or if both directions have neighbor boxes, the (-) side * is contracted. * Modified to use box manager AHB 11/06 *--------------------------------------------------------------------------*/ hypre_Box * hypre_BoxContraction( hypre_Box *box, hypre_StructGrid *sgrid, hypre_Index rfactor ) { hypre_BoxManager *boxman = hypre_StructGridBoxMan(sgrid); hypre_BoxArray *neighbor_boxes= NULL; hypre_Box *nbox; hypre_Box *contracted_box; hypre_Box *shifted_box; hypre_Box intersect_box; HYPRE_Int ndim= hypre_StructGridDim(sgrid); hypre_Index remainder, box_width; HYPRE_Int i, j, k, p; HYPRE_Int npos, nneg; /* get the boxes out of the box manager - use these as the neighbor boxes */ neighbor_boxes = hypre_BoxArrayCreate(0); hypre_BoxManGetAllEntriesBoxes( boxman, neighbor_boxes); contracted_box= hypre_BoxCreate(); hypre_ClearIndex(remainder); p= 0; for (i= 0; i< ndim; i++) { j= hypre_BoxIMax(box)[i] - hypre_BoxIMin(box)[i] + 1; box_width[i]= j; k= j%rfactor[i]; if (k) { remainder[i]= k; p++; } } hypre_CopyBox(box, contracted_box); if (p) { shifted_box= hypre_BoxCreate(); for (i= 0; i< ndim; i++) { if (remainder[i]) /* non-divisible in the i'th direction */ { /* shift box in + & - directions to determine which side to contract. */ hypre_CopyBox(box, shifted_box); hypre_BoxIMax(shifted_box)[i]+= box_width[i]; hypre_BoxIMin(shifted_box)[i]+= box_width[i]; npos= 0; hypre_ForBoxI(k, neighbor_boxes) { nbox= hypre_BoxArrayBox(neighbor_boxes, k); hypre_IntersectBoxes(shifted_box, nbox, &intersect_box); if (hypre_BoxVolume(&intersect_box)) { npos++; } } hypre_CopyBox(box, shifted_box); hypre_BoxIMax(shifted_box)[i]-= box_width[i]; hypre_BoxIMin(shifted_box)[i]-= box_width[i]; nneg= 0; hypre_ForBoxI(k, neighbor_boxes) { nbox= hypre_BoxArrayBox(neighbor_boxes, k); hypre_IntersectBoxes(shifted_box, nbox, &intersect_box); if (hypre_BoxVolume(&intersect_box)) { nneg++; } } if ( (npos) || ( (!npos) && (!nneg) ) ) { /* contract - direction */ hypre_BoxIMin(contracted_box)[i]+= remainder[i]; } else { if (nneg) { /* contract + direction */ hypre_BoxIMax(contracted_box)[i]-= remainder[i]; } } } /* if (remainder[i]) */ } /* for (i= 0; i< ndim; i++) */ hypre_BoxDestroy(shifted_box); } /* if (p) */ hypre_BoxArrayDestroy(neighbor_boxes); return contracted_box; }
hpdbscan.h
/* * Copyright (c) 2018 * Markus Goetz * * This software may be modified and distributed under the terms of MIT-style license. * * Description: Highly parallel DBSCAN algorithm implementation * * Maintainer: m.goetz * * Email: markus.goetz@kit.edu */ #ifndef HPDBSCAN_H #define HPDBSCAN_H #include <cmath> #include <cstdint> #include <hdf5.h> #include <iomanip> #include <omp.h> #include <stdexcept> #include <string> #include <vector> #ifdef WITH_MPI #include <mpi.h> #endif #ifdef WITH_OUTPUT #include <iostream> #endif #include "atomic.h" #include "constants.h" #include "dataset.h" #include "hdf5_util.h" #include "io.h" #include "rules.h" #include "spatial_index.h" class HPDBSCAN { float m_epsilon; size_t m_min_points; std::vector<EPSConfig> m_epsilon_overrides; std::vector<EPSConfig> m_epsilons_cache; // cache for each run of cluster #ifdef WITH_MPI int m_rank; int m_size; #endif template <typename T> Rules local_dbscan(Clusters& clusters, const SpatialIndex<T>& index) { const size_t lower = index.lower_halo_bound(); const size_t upper = index.upper_halo_bound(); Rules rules; Cell previous_cell = NOT_VISITED; std::vector<size_t> neighboring_points; // local DBSCAN run #pragma omp parallel for schedule(dynamic, 32) private(neighboring_points) firstprivate(previous_cell) reduction(merge: rules) for (size_t point = lower; point < upper; ++point) { // small optimization, we only perform a neighborhood query if it is a new cell Cell current_cell = index.cell_of(point); if (current_cell != previous_cell) { neighboring_points = index.get_neighbors(current_cell); previous_cell = current_cell; } std::vector<size_t> min_points_area; Cluster cluster_label = NOISE; if (neighboring_points.size() >= m_min_points) { // cluster_label = index.region_query(point, neighboring_points, eps2, clusters, min_points_area); cluster_label = index.region_query_multi_crit(point, neighboring_points, m_epsilons_cache, clusters, min_points_area); } if (min_points_area.size() >= m_min_points) { // set the label to be negative as to mark it as core point atomic_min(clusters.data() + point, -cluster_label); for (size_t other : min_points_area) { // get the absolute value here, we are only interested what cluster it is not in the core property Cluster other_cluster_label = std::abs(clusters[other]); // check whether the other point is a cluster if (clusters[other] < 0) { const std::pair<Cluster, Cluster> minmax = std::minmax(cluster_label, other_cluster_label); rules.update(minmax.second, minmax.first); } // mark as a border point atomic_min(clusters.data() + other, cluster_label); } } else if (clusters[point] == NOT_VISITED) { // mark as noise atomic_min(clusters.data() + point, NOISE); } } return rules; } #ifdef WITH_MPI template <typename T> void merge_halos(Clusters& clusters, Rules& rules, const SpatialIndex<T>& index) { Cuts cuts = index.compute_cuts(); // exchange the number of points in the halos int send_counts[m_size]; int recv_counts[m_size]; for (size_t i = 0; i < cuts.size(); ++i) { send_counts[i] = static_cast<int>(cuts[i].second - cuts[i].first); } MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD); // accumulate the numbers of points from each node int send_displs[m_size]; int recv_displs[m_size]; size_t total_items_to_receive = 0; for (int i = 0; i < m_size; ++i) { send_displs[i] = cuts[i].first; recv_displs[i] = total_items_to_receive; total_items_to_receive += static_cast<size_t>(recv_counts[i]); } // create a buffer for the incoming cluster labels and exchange them const size_t upper_halo_bound = index.upper_halo_bound(); const size_t lower_halo_bound = index.lower_halo_bound(); Cluster halo_labels[total_items_to_receive]; MPI_Alltoallv( clusters.data(), send_counts, send_displs, MPI_LONG, halo_labels, recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD ); // update the local clusters with the received information for (int i = 0; i < m_size; ++i) { size_t offset = (i < m_rank ? lower_halo_bound : upper_halo_bound - recv_counts[i]); for (int j = 0; j < recv_counts[i]; ++j) { const size_t index = j + offset; const Cluster own_cluster = clusters[index]; const Cluster halo_cluster = halo_labels[j + recv_displs[i]]; // incoming cluster label is core point, update it if (own_cluster < 0) { const std::pair<Cluster, Cluster> minmax = std::minmax(std::abs(own_cluster), halo_cluster); rules.update(minmax.second, minmax.first); } else { atomic_min(&clusters[index], halo_cluster); } } } } void distribute_rules(Rules& rules) { const int number_of_rules = static_cast<int>(rules.size()); // determine how many rules each rank will send int send_counts[m_size]; int send_displs[m_size]; int recv_counts[m_size]; int recv_displs[m_size]; for (int i = 0; i < m_size; ++i) { send_counts[i] = 2 * number_of_rules; send_displs[i] = 0; } MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD); // ... based on that calculate the displacements into the receive buffer size_t total = 0; for (int i = 0; i < m_size; ++i) { recv_displs[i] = total; total += recv_counts[i]; } // serialize the rules Cluster serialized_rules[send_counts[m_rank]]; size_t index = 0; for (const auto& rule : rules) { serialized_rules[index++] = rule.first; serialized_rules[index++] = rule.second; } // exchange the rules and update the own rules Cluster incoming_rules[total]; MPI_Alltoallv( serialized_rules, send_counts, send_displs, MPI_LONG, incoming_rules, recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD ); for (size_t i = 0; i < total; i += 2) { rules.update(incoming_rules[i], incoming_rules[i + 1]); } } #endif void apply_rules(Clusters& clusters, const Rules& rules) { #pragma omp parallel for for (size_t i = 0; i < clusters.size(); ++i) { const bool is_core = clusters[i] < 0; Cluster cluster = std::abs(clusters[i]); Cluster matching_rule = rules.rule(cluster); while (matching_rule < NOISE) { cluster = matching_rule; matching_rule = rules.rule(matching_rule); } clusters[i] = is_core ? -cluster : cluster; } } #ifdef WITH_OUTPUT void summarize(const Dataset& dataset, const Clusters& clusters) const { std::unordered_set<Cluster> unique_clusters; size_t cluster_points = 0; size_t core_points = 0; size_t noise_points = 0; // iterate through the points and sum up the for (size_t i = 0; i < dataset.m_chunk[0]; ++i) { const Cluster cluster = clusters[i]; unique_clusters.insert(std::abs(cluster)); if (cluster == 0) { ++noise_points; } else { ++cluster_points; } if (cluster < 0) { ++core_points; } } size_t metrics[] = {cluster_points, noise_points, core_points}; #ifdef WITH_MPI int number_of_unique_clusters = static_cast<int>(unique_clusters.size()); int set_counts[m_size]; int set_displs[m_size]; if (m_rank == 0) { #endif std::cout << "Summary..." << std::endl; #ifdef WITH_MPI } MPI_Gather(&number_of_unique_clusters, 1, MPI_INT, set_counts, 1, MPI_INT, 0, MPI_COMM_WORLD); // allocate the buffers for the serialized sets Clusters global_buffer; Clusters local_buffer(number_of_unique_clusters); std::copy(unique_clusters.begin(), unique_clusters.end(), local_buffer.begin()); // sum up the total number of elements on the MPI root to determine the global buffer size size_t buffer_size = 0; if (m_rank == 0) { for (int i = 0; i < m_size; ++i) { set_displs[i] = buffer_size; buffer_size += set_counts[i]; } global_buffer.resize(buffer_size); } // collect the individual unique clusters on the MPI root into a global buffer MPI_Gatherv( local_buffer.data(), number_of_unique_clusters, MPI_LONG, global_buffer.data(), set_counts, set_displs, MPI_LONG, 0, MPI_COMM_WORLD ); // accumulate the metrics of each node MPI_Reduce( m_rank == 0 ? MPI_IN_PLACE : metrics, metrics, sizeof(metrics) / sizeof(size_t), MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); if (m_rank == 0) { unique_clusters.insert(global_buffer.begin(), global_buffer.end()); #endif std::cout << "\tClusters: " << (metrics[1] ? unique_clusters.size() - 1 : unique_clusters.size()) << std::endl << "\tCluster points: " << metrics[0] << std::endl << "\tNoise points: " << metrics[1] << std::endl << "\tCore points: " << metrics[2] << std::endl; #ifdef WITH_MPI } #endif } #endif public: HPDBSCAN(float epsilon, size_t min_points, std::vector<EPSConfig> epsilon_overrides = std::vector<EPSConfig>()) : m_epsilon(epsilon), m_min_points(min_points), m_epsilon_overrides(std::move(epsilon_overrides)) { #ifdef WITH_MPI MPI_Comm_rank(MPI_COMM_WORLD, &m_rank); MPI_Comm_size(MPI_COMM_WORLD, &m_size); #endif // sanitize values if (m_epsilon <= 0.0) { throw std::invalid_argument("epsilon needs to be positive"); } for (auto &&eps_cfg: m_epsilon_overrides) { if (eps_cfg.epsilon <= 0.0) { throw std::invalid_argument("epsilon needs to be positive"); } } } Clusters cluster(const std::string& path, const std::string& dataset) { return cluster(path, dataset, omp_get_max_threads()); } Clusters cluster(const std::string& path, const std::string& dataset, int threads) { // read in the data Dataset data = IO::read_hdf5(path, dataset); // determine which template to invoke H5T_class_t type_class = H5Tget_class(data.m_type); size_t precision = H5Tget_precision(data.m_type); // integer if (type_class == H5T_INTEGER) { H5T_sign_t sign = H5Tget_sign(data.m_type); // signed if (sign == H5T_SGN_2) { if (precision == 8) { return cluster<int8_t>(data, threads); } else if (precision == 16) { return cluster<int16_t>(data, threads); } else if (precision == 32) { return cluster<int32_t>(data, threads); } else if (precision == 64) { return cluster<int64_t>(data, threads); } else { throw std::invalid_argument("Unsupported signed integer precision"); } // unsigned } else { if (precision == 8) { return cluster<uint8_t>(data, threads); } else if (precision == 16) { return cluster<uint16_t>(data, threads); } else if (precision == 32) { return cluster<uint32_t>(data, threads); } else if (precision == 64) { return cluster<uint64_t>(data, threads); } else { throw std::invalid_argument("Unsupported unsigned integer precision"); } } // floating point } else if (type_class == H5T_FLOAT) { if (precision == 32) { return cluster<float>(data, threads); } else if (precision == 64) { return cluster<double>(data, threads); } else { throw std::invalid_argument("Unsupported floating point precision"); } // unsupported type } else { throw std::invalid_argument("Unsupported data set type"); } } template <typename T> Clusters cluster(Dataset& dataset, int threads=omp_get_max_threads()) { #ifdef WITH_OUTPUT double execution_start = omp_get_wtime(); #endif // set the number of threads omp_set_num_threads(threads); // set default number formatting #ifdef WITH_OUTPUT std::cout << std::fixed << std::setw(11) << std::setprecision(6) << std::setfill(' '); #endif // initialize the feature indexer m_epsilons_cache.clear(); // whether dimension appears in overrides std::vector<bool> visited(dataset.m_chunk[1], false); for (auto &&cfg : m_epsilon_overrides) { m_epsilons_cache.push_back(cfg); for (auto &&d : cfg.dimensions) { visited[d] = true; } } // create a group for dimensions not appearing in overrides EPSConfig residue; for (size_t i = 0; i < visited.size(); i++) { if (!visited[i]) { residue.dimensions.push_back(i); } } residue.epsilon = m_epsilon; m_epsilons_cache.push_back(residue); std::vector<float> epsilon_map(dataset.m_chunk[1], m_epsilon); for (auto &&cfg : m_epsilons_cache) { for (auto &&dim : cfg.dimensions) { epsilon_map[dim] = cfg.epsilon; } } SpatialIndex<T> index(dataset, epsilon_map); // initialize the clusters array Clusters clusters(dataset.m_chunk[0], NOT_VISITED); // run the first local clustering round #ifdef WITH_OUTPUT double start = omp_get_wtime(); #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "Clustering..." << std::endl; std::cout << "\tDBSCAN... " << std::flush; #ifdef WITH_MPI } #endif start = omp_get_wtime(); #endif Rules rules = local_dbscan(clusters, index); #ifdef WITH_OUTPUT #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "[OK] in " << omp_get_wtime() - start << std::endl; #ifdef WITH_MPI } #endif start = omp_get_wtime(); #endif #ifdef WITH_MPI #ifdef WITH_OUTPUT if (m_rank == 0) { std::cout << "\tMerging halos... " << std::flush; } #endif merge_halos(clusters, rules, index); distribute_rules(rules); #ifdef WITH_OUTPUT if (m_rank == 0) { std::cout << "[OK] in " << omp_get_wtime() - start << std::endl; start = omp_get_wtime(); } #endif #endif #ifdef WITH_OUTPUT #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "\tAppyling rules... " << std::flush; #ifdef WITH_MPI } #endif #endif apply_rules(clusters, rules); #ifdef WITH_OUTPUT #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "[OK] in " << omp_get_wtime() - start << std::endl; #ifdef WITH_MPI } #endif #endif #ifdef WITH_OUTPUT #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "\tRecovering order... " << std::flush; start = omp_get_wtime(); #ifdef WITH_MPI } #endif #endif index.recover_initial_order(clusters); #ifdef WITH_OUTPUT #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "[OK] in " << omp_get_wtime() - start << std::endl; #ifdef WITH_MPI } #endif start = omp_get_wtime(); #endif #ifdef WITH_OUTPUT summarize(dataset, clusters); #ifdef WITH_MPI if (m_rank == 0) { #endif std::cout << "Total time: " << omp_get_wtime() - execution_start << std::endl; #ifdef WITH_MPI } #endif #endif // return the results return clusters; } template <typename T> Clusters cluster(T* data, int dim0, int dim1, int threads) { hsize_t chunk[2] = {static_cast<hsize_t>(dim0), static_cast<hsize_t>(dim1)}; Dataset dataset(data, chunk, HDF5_Types<T>::map()); return cluster<T>(dataset, threads); } template <typename T> Clusters cluster(T* data, int dim0, int dim1) { return cluster(data, dim0, dim1, omp_get_max_threads()); } }; #endif // HPDBSCAN_H
pi_omp_naive_2.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> static long num_steps = 1024 * 1024 * 1024; double step; int main () { const int MAX_T = 16; int i, t, nthreads; double x, pi, sum[MAX_T]; double start_time, run_time; step = 1.0/(double) num_steps; for(t = 1; t <= MAX_T; t*=2) { start_time = omp_get_wtime(); omp_set_num_threads(t); pi = 0.0; #pragma omp parallel { int i, nt, id; double x; id = omp_get_thread_num(); sum[id] = 0; nt = omp_get_num_threads(); if(id == 0) nthreads = nt; i = id; for (; i < num_steps; i += nt){ x = (i + 0.5) * step; sum[id] += 4.0/(1.0+x*x); } } for(i = 0; i < nthreads; i++) { pi += sum[i]; } pi = pi * step; run_time = omp_get_wtime() - start_time; printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time); } }
cryptocontext.h
/** * @file cryptocontext.h -- Control for encryption operations. * @author TPOC: palisade@njit.edu * * @section LICENSE * * Copyright (c) 2017, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #define SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #include "palisade.h" #include "encoding/plaintext.h" #include "encoding/byteplaintextencoding.h" #include "encoding/intplaintextencoding.h" #include "encoding/packedintplaintextencoding.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" namespace lbcrypto { template<typename Element> class CryptoContextFactory; /** * @brief CryptoContext * * A CryptoContext is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a CryptoContext; we say that various objects are * "created in" a context, and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContext methods. Guards are implemented to make certain that * only valid objects that have been created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized and recovered from a serialization */ template<typename Element> class CryptoContext : public Serializable { friend class CryptoContextFactory<Element>; private: shared_ptr<LPCryptoParameters<Element>> params; /*!< crypto parameters used for this context */ shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; /*!< algorithm used; accesses all crypto methods */ vector<shared_ptr<LPEvalKey<Element>>> evalMultKeys; /*!< cached evalmult keys */ std::map<usint, shared_ptr<LPEvalKey<Element>>> evalSumKeys; /*!< cached evalsum keys */ bool doTiming; vector<TimingInfo>* timeSamples; /** * Private methods to compare two contexts; this is only used internally and is not generally available * @param a - shared pointer in the object * @param b - this object, usually * @return true if the shared pointer is a pointer to "this" */ friend bool operator==(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { if( a.params.get() != b.params.get() ) return false; if( a.evalMultKeys.size() != b.evalMultKeys.size() ) return false; for( size_t i=0; i<a.evalMultKeys.size(); i++ ) if( a.evalMultKeys[i].get() != b.evalMultKeys[i].get() ) return false; if( a.evalSumKeys.size() != b.evalSumKeys.size() ) return false; for (const auto& kp : a.evalSumKeys) { const auto& vb = b.evalSumKeys.find(kp.first); if( vb == b.evalSumKeys.end() ) return false; // key in a not in b if( *kp.second != *vb->second ) return false; // mismatch } return true; } friend bool operator!=(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { return !( a == b ); } public: /** * CryptoContext constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContext(LPCryptoParameters<Element> *params = 0, LPPublicKeyEncryptionScheme<Element> *scheme = 0) { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; } /** * CryptoContext constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContext(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme) { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; } /** * Copy constructor * @param c - source */ CryptoContext(const CryptoContext<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; evalMultKeys = c.evalMultKeys; evalSumKeys = c.evalSumKeys; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContext<Element>& operator=(const CryptoContext<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; evalMultKeys = rhs.evalMultKeys; evalSumKeys = rhs.evalSumKeys; return *this; } /** * A CryptoContext is only valid if the shared pointers are both valid */ operator bool() const { return bool(params) && bool(scheme); } /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } /** * Serialize the CryptoContext * * @param serObj - rapidJson object for the serializaion * @return true on success */ bool Serialize(Serialized* serObj) const; /** * Deserialize the context AND initialize the algorithm * * @param serObj * @return true on success */ bool Deserialize(const Serialized& serObj) { throw std::logic_error("Deserialize by using CryptoContextFactory::DeserializeAndCreateContext"); } /** * Enable a particular feature for use with this CryptoContext * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } /** * Getter for element params * @return */ const shared_ptr<typename Element::Params> GetElementParams() const { return params->GetElementParams(); } /** * Get the cyclotomic order used for this context * * @return */ const usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ const usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const BigInteger& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const BigInteger& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(this, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeyGen, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using this algorithm's KeyGen method from two keys * @param pk first public key used to coordinate the creation of later public keys. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const shared_ptr<LPPublicKey<Element>> pk) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(this, pk, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKey, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using a vector of secret keys * @param secretKeys a vector of the secret keys to be used for multiparty computation. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const vector<shared_ptr<LPPrivateKey<Element>>>& secretKeys) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(this, secretKeys, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKeyvec, currentDateTime() - start) ); } return r; } /** * Lead Multiparty Decryption method for PALISADE multiparty operations. * This should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param privateKey the secret key of the lead decryption client * @param ciphertext vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> MultipartyDecryptLead( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to MultipartyDecryptLead was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to MultipartyDecryptLead was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptLead(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptLead, currentDateTime() - start) ); } return newCiphertext; } /** * Multiparty decryption method for PALISADE multiparty operations. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform this MultipartyDecryptMain operation. * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> MultipartyDecryptMain( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to MultipartyDecryptMain was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to MultipartyDecryptMain was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptMain(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptMain, currentDateTime() - start) ); } return newCiphertext; } /** * Final multiparty decryption method to fuse the partially decrypted ciphertexts into a decrypted plaintext. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param partialCiphertextVec - vector of partially decrypted ciphertexts. * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult MultipartyDecryptFusion( const std::vector<vector<shared_ptr<Ciphertext<Element>>>>& partialCiphertextVec, Plaintext *plaintext, bool doPadding = true) const { //Make sure we're processing ciphertexts. size_t last_ciphertext = partialCiphertextVec.size(); if (last_ciphertext < 1 ) return DecryptResult(); //Make sure ciphertexts are of non-zero length and that they'r eof the same length/ size_t ciphertext_size = partialCiphertextVec[0].size(); for( size_t i = 0; i < last_ciphertext; i++ ) { std::vector<shared_ptr<Ciphertext<Element>>> ciphertext = partialCiphertextVec[i]; // edge case if (ciphertext.size() == 0 || ciphertext.size() != ciphertext_size) return DecryptResult(); } double start = 0; if( doTiming ) start = currentDateTime(); size_t lastone = partialCiphertextVec[0].size() - 1; for( size_t ch = 0; ch < ciphertext_size; ch++ ) { vector<shared_ptr<Ciphertext<Element>>> ciphertextVec; for( size_t i = 0; i < last_ciphertext; i++ ) { std::vector<shared_ptr<Ciphertext<Element>>> ciphertext = partialCiphertextVec[i]; // edge case if (ciphertext[ch] == NULL || ciphertext[ch]->GetCryptoContext() != this) throw std::logic_error("A ciphertext passed to MultipartyDecryptFusion was not generated with this crypto context"); ciphertextVec.push_back(ciphertext[ch]); } Poly decrypted; DecryptResult result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(ciphertextVec, &decrypted); if (result.isValid == false) return result; plaintext->Decode(ciphertextVec[0]->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if (ch == lastone && doPadding) { plaintext->Unpad(ciphertextVec[0]->GetCryptoParameters()->GetPlaintextModulus()); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptFusion, currentDateTime() - start) ); } return DecryptResult(plaintext->GetLength()); } /** * SparseKeyGen generates a key pair with special structure, and without full entropy, * for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(this, true); if( doTiming ) { timeSamples->push_back( TimingInfo(OpSparseKeyGen, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> ReKeyGen( const shared_ptr<LPPublicKey<Element>> newKey, const shared_ptr<LPPrivateKey<Element>> oldKey) const { if( newKey == NULL || oldKey == NULL || newKey->GetCryptoContext() != this || oldKey->GetCryptoContext() != this ) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPubPri, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> ReKeyGen( const shared_ptr<LPPrivateKey<Element>> newKey, const shared_ptr<LPPrivateKey<Element>> oldKey) const { if (newKey == NULL || oldKey == NULL || newKey->GetCryptoContext() != this || oldKey->GetCryptoContext() != this) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPriPri, currentDateTime() - start) ); } return r; } /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult operator * @param key * @return new evaluation key */ void EvalMultKeyGen(const shared_ptr<LPPrivateKey<Element>> key); /** * GetEvalMultKey fetches the cached eval mult keys * * @return the key to use */ const shared_ptr<LPEvalKey<Element>> GetEvalMultKey() const { if( evalMultKeys.size() != 1 ) throw std::logic_error("You need to use EvalMultKeyGen so that you have an EvalMultKey available"); return evalMultKeys[0]; } /** * SetEvalMultKeys is used by the deserializer to initialize the keyset for EvalSum * FIXME should be private? * @param evalMultKeys - new key map */ void SetEvalMultKeys(vector<shared_ptr<LPEvalKey<Element>>>& evalMultKeysNew) { evalMultKeys.clear(); this->evalMultKeys = evalMultKeysNew; } /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch operation * @param key1 * @param key2 * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> KeySwitchGen( const shared_ptr<LPPrivateKey<Element>> key1, const shared_ptr<LPPrivateKey<Element>> key2) const { if( key1 == NULL || key2 == NULL || key1->GetCryptoContext() != this || key2->GetCryptoContext() != this ) throw std::logic_error("Keys passed to KeySwitchGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitchGen, currentDateTime() - start) ); } return r; } /** * Encrypt method for PALISADE * @param publicKey - for encryption * @param plaintext - to encrypt * @param doPadding - if true, pad the input out to fill the encrypted chunk * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ std::vector<shared_ptr<Ciphertext<Element>>> Encrypt( const shared_ptr<LPPublicKey<Element>> publicKey, const Plaintext& plaintext, bool doPadding = true, bool doEncryption = true) const { std::vector<shared_ptr<Ciphertext<Element>>> cipherResults; if( publicKey == NULL || publicKey->GetCryptoContext() != this ) throw std::logic_error("key passed to Encrypt was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); size_t chunkSize = plaintext.GetChunksize(publicKey->GetCryptoContext()->GetRingDimension(), ptm); size_t ptSize = plaintext.GetLength(); size_t rounds = ptSize / chunkSize; if (doPadding == false && ptSize%chunkSize != 0 && typeid(plaintext) == typeid(BytePlaintextEncoding)) { throw std::logic_error("Cannot Encrypt without padding with chunksize " + std::to_string(chunkSize) + " and plaintext size " + std::to_string(ptSize)); } // if there is a partial chunk OR if there isn't but we need to pad if (ptSize%chunkSize != 0 || doPadding == true) rounds += 1; double start = 0; if( doTiming ) start = currentDateTime(); for (size_t bytes = 0, i = 0; i < rounds; bytes += chunkSize, i++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext.Encode(ptm, &pt, bytes, chunkSize); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); if (!ciphertext) { cipherResults.clear(); break; } cipherResults.push_back(ciphertext); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncrypt, currentDateTime() - start) ); } return cipherResults; } /** * Encrypt a matrix of plaintexts (integer encoding) * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const shared_ptr<LPPublicKey<Element>> publicKey, const Matrix<IntPlaintextEncoding> &plaintext, bool doEncryption = true) { auto zeroAlloc = [=]() { return make_unique<RationalCiphertext<Element>>(this, true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); if (publicKey == NULL || publicKey->GetCryptoContext() != this) throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext(row,col).Encode(ptm, &pt); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); (*cipherResults)(row, col).SetNumerator(*ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, currentDateTime() - start) ); } return cipherResults; } /** * Encrypt a matrix of plaintexts (packed encoding) * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const shared_ptr<LPPublicKey<Element>> publicKey, const Matrix<PackedIntPlaintextEncoding> &plaintext, bool doEncryption = true) { auto zeroAlloc = [=]() { return make_unique<RationalCiphertext<Element>>(this, true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); if (publicKey == NULL || publicKey->GetCryptoContext() != this) throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext(row, col).Encode(ptm, &pt); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); (*cipherResults)(row, col).SetNumerator(*ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPacked, currentDateTime() - start) ); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each piece of ciphertext, * and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return */ void EncryptStream( const shared_ptr<LPPublicKey<Element>> publicKey, std::istream& instream, std::ostream& outstream, bool doEncryption = true) const { // NOTE timing this operation is not supported if( publicKey == NULL || publicKey->GetCryptoContext() != this ) throw std::logic_error("key passed to EncryptStream was not generated with this crypto context"); bool padded = false; BytePlaintextEncoding px; const BigInteger& ptm = publicKey->GetCryptoContext()->GetCryptoParameters()->GetPlaintextModulus(); size_t chunkSize = px.GetChunksize(publicKey->GetCryptoContext()->GetRingDimension(), ptm); char *ptxt = new char[chunkSize]; while (instream.good()) { instream.read(ptxt, chunkSize); size_t nRead = instream.gcount(); if (nRead <= 0 && padded) break; BytePlaintextEncoding px(ptxt, nRead); if (nRead < chunkSize) { padded = true; } Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); px.Encode(publicKey->GetCryptoParameters()->GetPlaintextModulus(), &pt, 0, chunkSize); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); if (!ciphertext) { delete [] ptxt; return; } Serialized cS; if (ciphertext->Serialize(&cS)) { if (!SerializableHelper::SerializationToStream(cS, outstream)) { delete [] ptxt; return; } } else { delete [] ptxt; return; } } delete [] ptxt; return; } /** * Decrypt method for PALISADE * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult Decrypt( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext, Plaintext *plaintext, bool doPadding = true) const { // edge case if (ciphertext.size() == 0) return DecryptResult(); if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to Decrypt was not generated with this crypto context"); size_t lastone = ciphertext.size() - 1; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t ch = 0; ch < ciphertext.size(); ch++ ) { if( ciphertext[ch] == NULL || ciphertext[ch]->GetCryptoContext() != this ) throw std::logic_error("A ciphertext passed to Decrypt was not generated with this crypto context"); Poly decrypted; DecryptResult result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext[ch], &decrypted); if (result.isValid == false) return result; plaintext->Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if (ch == lastone && doPadding) { plaintext->Unpad(privateKey->GetCryptoParameters()->GetPlaintextModulus()); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecrypt, currentDateTime() - start) ); } return DecryptResult(plaintext->GetLength()); } /** * Decrypt method for a matrix of ciphertexts (integer encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<IntPlaintextEncoding> *numerator, Matrix<IntPlaintextEncoding> *denominator) const { // edge case if ((ciphertext->GetCols()== 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols())|| (ciphertext->GetRows() != numerator->GetRows()) || (ciphertext->GetCols() != denominator->GetCols()) || (ciphertext->GetRows() != denominator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); if (resultN.isValid == false) return resultN; (*numerator)(row,col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); Poly decryptedDenominator; if( (*ciphertext)(row,col).GetIntegerFlag() == true ) { decryptedDenominator = decryptedNumerator.CloneParametersOnly(); decryptedDenominator.SetValuesToZero(); decryptedDenominator.SetValAtIndex(0,1); } else { const shared_ptr<Ciphertext<Element>> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator); if (resultD.isValid == false) return resultD; } (*denominator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedDenominator); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, currentDateTime() - start) ); } return DecryptResult((*numerator)(numerator->GetRows()-1,numerator->GetCols()-1).GetLength()); } /** * Decrypt method for a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<PackedIntPlaintextEncoding> *numerator, Matrix<PackedIntPlaintextEncoding> *denominator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols()) || (ciphertext->GetRows() != numerator->GetRows()) || (ciphertext->GetCols() != denominator->GetCols()) || (ciphertext->GetRows() != denominator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); if (resultN.isValid == false) return resultN; (*numerator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); const shared_ptr<Ciphertext<Element>> ctD = (*ciphertext)(row, col).GetDenominator(); Poly decryptedDenominator; DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator); if (resultD.isValid == false) return resultD; (*denominator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedDenominator); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPacked, currentDateTime() - start) ); } return DecryptResult((*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1).GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<PackedIntPlaintextEncoding> *numerator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols()) || (ciphertext->GetRows() != numerator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if (doTiming) start = currentDateTime(); //force all precomputations to take place in advance if ((*ciphertext)(0, 0).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(0, 0).GetNumerator(); Poly decryptedNumerator; //DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); //if (resultN.isValid == false) return resultN; (*numerator)(0, 0).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; //DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); //if (resultN.isValid == false) return resultN; (*numerator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, currentDateTime() - start)); } return DecryptResult((*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1).GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return */ void DecryptStream( const shared_ptr<LPPrivateKey<Element>> privateKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to DecryptStream was not generated with this crypto context"); Serialized serObj; size_t tot = 0; bool firstTime = true; BytePlaintextEncoding pte[2]; bool whichArray = false; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { shared_ptr<Ciphertext<Element>> ct; if( (ct = deserializeCiphertext(serObj)) != NULL ) { Poly decrypted; DecryptResult res = GetEncryptionAlgorithm()->Decrypt(privateKey, ct, &decrypted); if( !res.isValid ) return; tot += res.messageLength; pte[whichArray].Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if( !firstTime ) { outstream << pte[!whichArray]; pte[!whichArray].clear(); } firstTime = false; whichArray = !whichArray; } else return; } // unpad and write the last one pte[!whichArray].Unpad(privateKey->GetCryptoParameters()->GetPlaintextModulus()); outstream << pte[!whichArray]; return; } /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @return vector of shared pointers to re-encrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> ReEncrypt( shared_ptr<LPEvalKey<Element>> evalKey, std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( evalKey == NULL || evalKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to ReEncrypt was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to ReEncrypt was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpReEncrypt, currentDateTime() - start) ); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted ciphertext */ void ReEncryptStream( const shared_ptr<LPEvalKey<Element>> evalKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( evalKey == NULL || evalKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to ReEncryptStream was not generated with this crypto context"); Serialized serObj; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { shared_ptr<Ciphertext<Element>> ct; ct = deserializeCiphertext(serObj); if( ct ) { std::vector<shared_ptr<Ciphertext<Element>>> allCt; allCt.push_back(ct); std::vector<shared_ptr<Ciphertext<Element>>> reCt = ReEncrypt(evalKey, allCt); Serialized serReObj; if( reCt[0]->Serialize(&serReObj) ) { SerializableHelper::SerializationToStream(serReObj, outstream); } else { return; } allCt.clear(); } else { return; } } } /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ shared_ptr<Ciphertext<Element>> EvalAdd(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalAdd was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAdd, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ shared_ptr<Ciphertext<Element>> EvalSub(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalSub was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSub, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalAddPLain - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ shared_ptr<Ciphertext<Element>> EvalAddPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { double start = 0; if( doTiming ) start = currentDateTime(); auto rv = EvalAdd(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddPlain, currentDateTime() - start) ); } return rv; } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ shared_ptr<Ciphertext<Element>> EvalSubPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { double start = 0; if( doTiming ) start = currentDateTime(); auto rv = EvalSub(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubPlain, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ shared_ptr<Ciphertext<Element>> EvalMult(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); auto ek = GetEvalMultKey(); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalMult - PALISADE EvalMult method for a a multiplication of ciphertext by plaintext * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ shared_ptr<Ciphertext<Element>> EvalMultPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { if (ciphertext == NULL || plaintext == NULL || ciphertext->GetCryptoContext() != this || plaintext->GetCryptoContext() != this) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMultPlain(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultPlain, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts, followed by recrypt with given key * @param ct1 * @param ct2 * @param ek * @return new ciphertext for ct1 * ct2, recrypted with ek */ shared_ptr<Ciphertext<Element>> EvalMult(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2, const shared_ptr<LPEvalKey<Element>> ek) const { if( ct1 == NULL || ct2 == NULL || ek == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this || ek->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultKey, currentDateTime() - start) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Ciphertext<Element>> EvalNegate(const shared_ptr<Ciphertext<Element>> ct) const { if (ct == NULL || ct->GetCryptoContext() != this) throw std::logic_error("Information passed to EvalNegate was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNeg, currentDateTime() - start) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ct->GetAllocator(), ct->GetRows(), ct->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = -((*ct)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNegMatrix, currentDateTime() - start) ); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, shared_ptr<LPEvalKey<Element>>>> EvalAutomorphismKeyGen(const shared_ptr<LPPublicKey<Element>> publicKey, const shared_ptr<LPPrivateKey<Element>> origPrivateKey, const std::vector<usint> &indexList) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(publicKey, origPrivateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismKeyGen, currentDateTime() - start) ); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalAutomorphism(const shared_ptr<Ciphertext<Element>> ciphertext, usint i, const std::map<usint, shared_ptr<LPEvalKey<Element>>> &evalKeys) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismI, currentDateTime() - start) ); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, shared_ptr<LPEvalKey<Element>>>> EvalAutomorphismKeyGen(const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<usint> &indexList) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismK, currentDateTime() - start) ); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<LPPublicKey<Element>> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ const std::map<usint, shared_ptr<LPEvalKey<Element>>>& GetEvalSumKey() const; /** * SetEvalSumKeys - used by deserializer to set the keys for EvalSum * FIXME should be private? * @param evalSumKeys - new key map */ void SetEvalSumKeys(std::map<usint, shared_ptr<LPEvalKey<Element>>>& evalSumKeys) { this->evalSumKeys.clear(); this->evalSumKeys = evalSumKeys; } /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalSum(const shared_ptr<Ciphertext<Element>> ciphertext, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalInnerProduct(const shared_ptr<Ciphertext<Element>> ciphertext1, const shared_ptr<Ciphertext<Element>> ciphertext2, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ shared_ptr<Ciphertext<Element>> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * EvalLinRegressBatched- Computes the parameter vector for linear regression using the least squares method * Supported only in batched mode; currently works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { //if (ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this) // throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLinRegression, currentDateTime() - start) ); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new Ciphertext after applying key switch */ shared_ptr<Ciphertext<Element>> KeySwitch( const shared_ptr<LPEvalKey<Element>> keySwitchHint, const shared_ptr<Ciphertext<Element>> ciphertext) const { if( keySwitchHint == NULL || keySwitchHint->GetCryptoContext() != this ) throw std::logic_error("Key passed to KeySwitch was not generated with this crypto context"); if( ciphertext == NULL || ciphertext->GetCryptoContext() != this ) throw std::logic_error("Ciphertext passed to KeySwitch was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitch, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Ciphertext<Element>> ModReduce(shared_ptr<Ciphertext<Element>> ciphertext) const { if( ciphertext == NULL || ciphertext->GetCryptoContext() != this ) throw std::logic_error("Information passed to ModReduce was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational(RationalCiphertext<Element> ciphertext) const { double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Ciphertext<Element>> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); shared_ptr<Ciphertext<Element>> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return RationalCiphertext<Element>(n,d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix(shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = ModReduceRational((*ciphertext)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduceMatrix, currentDateTime() - start) ); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ shared_ptr<Ciphertext<Element>> LevelReduce(const shared_ptr<Ciphertext<Element>> cipherText1, const shared_ptr<LPEvalKeyNTRU<Element>> linearKeySwitchHint) const { if( cipherText1 == NULL || linearKeySwitchHint == NULL || cipherText1->GetCryptoContext() != this || linearKeySwitchHint->GetCryptoContext() != this) { throw std::logic_error("Information passed to LevelReduce was not generated with this crypto context"); } double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->LevelReduce(cipherText1, linearKeySwitchHint); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLevelReduce, currentDateTime() - start) ); } return rv; } /** * RingReduce - PALISADE RingReduce method * @param ciphertext - vector of ciphertext * @param keySwitchHint - the keySwitchHint from original private key to sparse private key * @return vector of ring-reduced ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> RingReduce( std::vector<shared_ptr<Ciphertext<Element>>> ciphertext, const shared_ptr<LPEvalKey<Element>> keySwitchHint) const { if( keySwitchHint == NULL || keySwitchHint->GetCryptoContext() != this ) throw std::logic_error("Key passed to RingReduce was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext(ciphertext.size()); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t i = 0; i < ciphertext.size(); i++) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("Ciphertext passed to RingReduce was not generated with this crypto context"); newCiphertext[i] = GetEncryptionAlgorithm()->RingReduce(ciphertext[i], keySwitchHint); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpRingReduce, currentDateTime() - start) ); } return newCiphertext; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original private key to the quadratic key * return vector of resulting ciphertext */ shared_ptr<Ciphertext<Element>> ComposedEvalMult( const shared_ptr<Ciphertext<Element>> ciphertext1, const shared_ptr<Ciphertext<Element>> ciphertext2) const { if( ciphertext1 == NULL || ciphertext2 == NULL || ciphertext1->GetCryptoContext() != this || ciphertext2->GetCryptoContext() != this ) throw std::logic_error("Ciphertexts passed to ComposedEvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, GetEvalMultKey()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpComposedEvalMult, currentDateTime() - start) ); } return rv; } /** * Deserialize into a Public Key * @param serObj * @return deserialized object */ shared_ptr<LPPublicKey<Element>> deserializePublicKey(const Serialized& serObj); /** * Deserialize into a Private Key * @param serObj * @return deserialized object */ shared_ptr<LPPrivateKey<Element>> deserializeSecretKey(const Serialized& serObj); /** * Deserialize into a Ciphertext * @param serObj * @return deserialized object */ shared_ptr<Ciphertext<Element>> deserializeCiphertext(const Serialized& serObj); /** * Deserialize into an Eval Key * @param serObj * @return deserialized object */ shared_ptr<LPEvalKey<Element>> deserializeEvalKey(const Serialized& serObj); }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template<typename Element> class CryptoObject { protected: CryptoContext<Element> *context; public: CryptoObject(CryptoContext<Element> *cc = 0) : context(cc) {} virtual ~CryptoObject() {} CryptoContext<Element> *GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from user parameters * */ template<typename Element> class CryptoContextFactory { public: /** * construct a PALISADE CryptoContext for the LTV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextLTV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the LTV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextLTV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the LTV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( const usint plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContext for the LTV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( shared_ptr<EncodingParams> encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContext for the FV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0"); /** * construct a PALISADE CryptoContext for the FV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0"); /** * construct a PALISADE CryptoContext for the FV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV( const usint plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContext for the FV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV( shared_ptr<EncodingParams> encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContext for the BV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextBV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextBV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextSHIELD(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextSHIELD(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the StehleSteinfeld Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the StehleSteinfeld Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the Null Scheme * @param modulus * @return */ static shared_ptr<CryptoContext<Element>> genCryptoContextNull(shared_ptr<typename Element::Params> ep, const usint ptModulus); /** * construct a PALISADE CryptoContext for the Null Scheme * @param modulus * @return */ static shared_ptr<CryptoContext<Element>> genCryptoContextNull(shared_ptr<typename Element::Params> ep, shared_ptr<EncodingParams> encodingParams); /** * Create a PALISADE CryptoContext from a serialization * @param serObj * @return new context */ static shared_ptr<CryptoContext<Element>> DeserializeAndCreateContext(const Serialized& serObj, bool noKeys = false); }; } #endif /* SRC_DEMO_PRE_CRYPTOCONTEXT_H_ */
GB_unop__identity_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_int64 // op(A') function: GB_unop_tran__identity_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_int64 ( float *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_is_diagonal.c
//------------------------------------------------------------------------------ // GB_is_diagonal: check if A is a diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Returns true if A is a square diagonal matrix, with all diagonal entries // present. All pending tuples are ignored. Zombies are treated as entries. #include "GB_mxm.h" #include "GB_atomics.h" bool GB_is_diagonal // true if A is diagonal ( const GrB_Matrix A, // input matrix to examine GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; ASSERT_MATRIX_OK (A, "A check diag", GB0) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // trivial cases //-------------------------------------------------------------------------- int64_t n = GB_NROWS (A) ; int64_t ncols = GB_NCOLS (A) ; if (n != ncols) { // A is rectangular return (false) ; } if (GB_IS_BITMAP (A)) { // never treat bitmaps as diagonal return (false) ; } if (GB_IS_FULL (A)) { // A is full, and is diagonal only if 1-by-1, but always return // false so that GB_AxB_rowscale and GB_AxB_colscale are not used // by GB_reduce_to_vector. return (false) ; } int64_t anz = GB_NNZ (A) ; int64_t nvec = A->nvec ; if (n != anz || n != nvec) { // A must have exactly n entries in n vectors. A can be sparse or // hypersparse. If hypersparse, all vectors must be present, so // Ap has size n+1 whether sparse or hypersparse. return (false) ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // Break the work into lots of tasks so the early-exit can be exploited. GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (256 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // examine each vector of A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ai = A->i ; int diagonal = true ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // check for early exit //---------------------------------------------------------------------- int diag = true ; { GB_ATOMIC_READ diag = diagonal ; } if (!diag) continue ; //---------------------------------------------------------------------- // check if vectors jstart:jend-1 are diagonal //---------------------------------------------------------------------- int64_t jstart, jend ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; diag && j < jend ; j++) { int64_t p = Ap [j] ; int64_t ajnz = Ap [j+1] - p ; if (ajnz != 1) { // A(:,j) must have exactly one entry diag = false ; } int64_t i = Ai [p] ; if (i != j) { // the single entry must be A(i,i) diag = false ; } } //---------------------------------------------------------------------- // early exit: tell all other tasks to halt //---------------------------------------------------------------------- if (!diag) { GB_ATOMIC_WRITE diagonal = false ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return ((bool) diagonal) ; }
iRCCE_synch.c
///************************************************************************************* // Synchronization functions. // Single-bit and whole-cache-line flags are sufficiently different that we provide // separate implementations of the synchronization routines for each case //************************************************************************************** // // Author: Rob F. Van der Wijngaart // Intel Corporation // Date: 008/30/2010 // //************************************************************************************** // // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // [2010-10-25] added support for non-blocking send/recv operations // - iRCCE_isend(), ..._test(), ..._wait(), ..._push() // - iRCCE_irecv(), ..._test(), ..._wait(), ..._push() // by Carsten Clauss, Chair for Operating Systems, // RWTH Aachen University // // [2010-11-12] extracted non-blocking code into separate library // by Carsten Scholtes // // [2011-01-21] updated the datatype of RCCE_FLAG according to the // recent version of RCCE // // [2011-04-12] added marco test for rcce version // // [2012-11-06] add barrier implementation as described in: // USENIX HotPar'12 Eval. Hardw. Synch. Supp. SCC // by Pablo Reble // #include "iRCCE_lib.h" #ifdef SINGLEBITFLAGS #warning iRCCE_TAGGED_FLAGS: for using this feature, SINGLEBITFLAGS must be disabled! (make SINGLEBITFLAGS=0) #endif #ifdef SINGLEBITFLAGS int iRCCE_test_flag(RCCE_FLAG flag, RCCE_FLAG_STATUS val, int *result) { t_vcharp cflag; #ifdef RCCE_VERSION // this is a newer version than V1.0.13 t_vcharp flaga; #endif cflag = flag.line_address; #ifdef RCCE_VERSION // this is a newer version than V1.0.13 flaga = flag.flag_addr; #endif // always flush/invalidate to ensure we read the most recent value of *flag // keep reading it until it has the required value #ifdef _OPENMP #pragma omp flush #endif RC_cache_invalidate(); #ifdef RCCE_VERSION // this is a newer version than V1.0.13 if(RCCE_bit_value(flaga, (flag.location)%RCCE_FLAGS_PER_BYTE) != val) { #else if(RCCE_bit_value(cflag, flag.location) != val) { #endif (*result) = 0; } else { (*result) = 1; } return(iRCCE_SUCCESS); } #else ////////////////////////////////////////////////////////////////// // LOCKLESS SYNCHRONIZATION USING ONE WHOLE CACHE LINE PER FLAG // ////////////////////////////////////////////////////////////////// int iRCCE_test_flag(RCCE_FLAG flag, RCCE_FLAG_STATUS val, int *result) { #ifndef RCCE_VERSION RCCE_FLAG flag_pos = flag; #endif #ifdef _OPENMP #pragma omp flush #endif RC_cache_invalidate(); #ifdef RCCE_VERSION if((RCCE_FLAG_STATUS)(*flag.flag_addr) != val) { #else if((*flag_pos) != val) { #endif (*result) = 0; } else { (*result) = 1; } return(iRCCE_SUCCESS); } ////////////////////////////////////////////////////////////////////////// // FUNCTIONS FOR HANDLING TAGGED FLAGS (NEED WHOLE CACHE LINE PER FLAG) // ////////////////////////////////////////////////////////////////////////// int iRCCE_flag_alloc_tagged(RCCE_FLAG *flag) { #ifdef RCCE_VERSION // this is a newer version than V1.0.13 flag->flag_addr = RCCE_malloc(RCCE_LINE_SIZE); if (!(flag->flag_addr)) return(RCCE_error_return(RCCE_debug_synch,RCCE_ERROR_FLAG_UNDEFINED)); return(RCCE_SUCCESS); #else return RCCE_flag_alloc(flag); #endif } int iRCCE_flag_write_tagged(RCCE_FLAG *flag, RCCE_FLAG_STATUS val, int ID, void *tag, int len) { unsigned int val_array[RCCE_LINE_SIZE / sizeof(int)] = {[0 ... RCCE_LINE_SIZE/sizeof(int)-1] = 0}; int error, i, j; *val_array = val; #ifdef _OPENMP val_array[RCCE_LINE_SIZE/sizeof(int)-1] = val; #endif if(tag) { if(len > iRCCE_MAX_TAGGED_LEN) len = iRCCE_MAX_TAGGED_LEN; iRCCE_memcpy_put(&val_array[sizeof(int)], tag, len); } #ifdef RCCE_VERSION error = iRCCE_put(flag->flag_addr, (t_vcharp)val_array, RCCE_LINE_SIZE, ID); #else error = iRCCE_put((t_vcharp)(*flag), (t_vcharp)val_array, RCCE_LINE_SIZE, ID); #endif return(RCCE_error_return(RCCE_debug_synch,error)); } int iRCCE_flag_read_tagged(RCCE_FLAG flag, RCCE_FLAG_STATUS *val, int ID, void *tag, int len) { int val_array[RCCE_LINE_SIZE / sizeof(int)]; int error, i, j; #ifdef RCCE_VERSION if((error=iRCCE_get((t_vcharp)val_array, flag.flag_addr, RCCE_LINE_SIZE, ID))) return(RCCE_error_return(RCCE_debug_synch,error)); #else if((error=iRCCE_get((t_vcharp)val_array, (t_vcharp)flag, RCCE_LINE_SIZE, ID))) return(RCCE_error_return(RCCE_debug_synch,error)); #endif if(val) *val = *val_array; #ifdef _OPENMP if(val) *val = val_array[RCCE_LINE_SIZE / sizeof(int) - 1]; #endif if( (val) && (*val) && (tag) ) { if(len > iRCCE_MAX_TAGGED_LEN) len = iRCCE_MAX_TAGGED_LEN; iRCCE_memcpy_put(tag, &val_array[1], len); } return(RCCE_SUCCESS); } int iRCCE_wait_tagged(RCCE_FLAG flag, RCCE_FLAG_STATUS val, void *tag, int len) { int i, j; #ifndef RCCE_VERSION RCCE_FLAG flag_pos = flag; #ifdef _OPENMP flag_pos = flag + RCCE_LINE_SIZE / sizeof(int) - 1; #endif #endif do { #ifdef _OPENMP #pragma omp flush #endif RC_cache_invalidate(); #ifdef RCCE_VERSION // this is a newer version than V1.0.13 #ifdef _OPENMP } while ((RCCE_FLAG_STATUS)(*( ((int*)flag.flag_addr) + RCCE_LINE_SIZE / sizeof(int) - 1)) != val); #else } while ((RCCE_FLAG_STATUS)(*flag.flag_addr) != val); #endif #else } while ((*flag_pos) != val); #endif if(tag) { if(len > iRCCE_MAX_TAGGED_LEN) len = iRCCE_MAX_TAGGED_LEN; #ifdef RCCE_VERSION iRCCE_memcpy_put(tag, &((char*)flag.flag_addr)[sizeof(int)], len); #else iRCCE_memcpy_put(tag, &((char*)flag)[sizeof(int)], len); #endif } return(RCCE_SUCCESS); } int iRCCE_test_tagged(RCCE_FLAG flag, RCCE_FLAG_STATUS val, int *result, void *tag, int len) { int i, j; #ifndef RCCE_VERSION RCCE_FLAG flag_pos = flag; #ifdef _OPENMP flag_pos = flag + RCCE_LINE_SIZE / sizeof(int) - 1; #endif #endif #ifdef _OPENMP #pragma omp flush #endif RC_cache_invalidate(); #ifdef RCCE_VERSION if((RCCE_FLAG_STATUS)(*flag.flag_addr) != val) { #else if((*flag_pos) != val) { #endif (*result) = 0; } else { (*result) = 1; } if((*result) && tag) { if(len > iRCCE_MAX_TAGGED_LEN) len = iRCCE_MAX_TAGGED_LEN; #ifdef RCCE_VERSION iRCCE_memcpy_put(tag, &((char*)flag.flag_addr)[sizeof(int)], len); #else iRCCE_memcpy_put(tag, &((char*)flag)[sizeof(int)], len); #endif } return(RCCE_SUCCESS); } int iRCCE_get_max_tagged_len(void) { return iRCCE_MAX_TAGGED_LEN; } #endif
network.h
// == mojo ==================================================================== // // Copyright (c) gnawice@gnawice.com. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // network.h: The main artificial neural network graph for mojo // ==================================================================== mojo == #pragma once #include <string> #include <iostream> // cout #include <fstream> #include <sstream> #include <map> #include <vector> #include "layer.h" #include "solver.h" #include "activation.h" #include "cost.h" // hack for VS2010 to handle c++11 for(:) #if (_MSC_VER == 1600) #ifndef __for__ #define __for__ for each #define __in__ in #endif #else #ifndef __for__ #define __for__ for #define __in__ : #endif #endif #if defined(MOJO_CV2) || defined(MOJO_CV3) #ifdef MOJO_CV2 #include "opencv2/opencv.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/contrib/contrib.hpp" #pragma comment(lib, "opencv_core249") #pragma comment(lib, "opencv_highgui249") #pragma comment(lib, "opencv_imgproc249") #pragma comment(lib, "opencv_contrib249") #else //#ifdef MOJO_CV3 #include "opencv2/opencv.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #pragma comment(lib, "opencv_world310") #endif #endif namespace mojo { #if defined(MOJO_CV2) || defined(MOJO_CV3) // forward declare these for data augmentation cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false); mojo::matrix cv2matrix(cv::Mat &m); mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f); #endif // sleep needed for threading #ifdef _WIN32 #include <windows.h> void mojo_sleep(unsigned milliseconds) { Sleep(milliseconds); } #else #include <unistd.h> void mojo_sleep(unsigned milliseconds) { usleep(milliseconds * 1000); } #endif #ifdef MOJO_PROFILE_LAYERS #ifdef _WIN32 //* used for profiling layers double PCFreq = 0.0; __int64 CounterStart = 0; void StartCounter() { LARGE_INTEGER li; if (!QueryPerformanceFrequency(&li)) return; PCFreq = double(li.QuadPart) / 1000.0; QueryPerformanceCounter(&li); CounterStart = li.QuadPart; } double GetCounter() { LARGE_INTEGER li; QueryPerformanceCounter(&li); return double(li.QuadPart - CounterStart) / PCFreq; } #else void StartCounter(){} double GetCounter(){return 0;} #endif #endif //*/ void replace_str(std::string& str, const std::string& from, const std::string& to) { if (from.empty()) return; size_t start_pos = 0; while ((start_pos = str.find(from, start_pos)) != std::string::npos) { str.replace(start_pos, from.length(), to); start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx' } } // returns Energy (euclidian distance / 2) and max index float match_labels(const float *out, const float *target, const int size, int *best_index = NULL) { float E = 0; int max_j = 0; for (int j = 0; j<size; j++) { E += (out[j] - target[j])*(out[j] - target[j]); if (out[max_j]<out[j]) max_j = j; } if (best_index) *best_index = max_j; E *= 0.5; return E; } // returns index of highest value (argmax) int arg_max(const float *out, const int size) { int max_j = 0; for (int j = 0; j<size; j++) if (out[max_j]<out[j]) {max_j = j; }//std::cout <<j<<",";} return max_j; } //---------------------------------------------------------------------- // network // - class that holds all the layers and connection information // - runs forward prediction class network { int _size; // output size int _thread_count; // determines number of layer sets (copys of layers) int _internal_thread_count; // used for speeding up convolutions, etc.. static const int MAIN_LAYER_SET = 0; // training related stuff int _batch_size; // determines number of dW sets float _skip_energy_level; bool _smart_train; std::vector <float> _running_E; double _running_sum_E; cost_function *_cost_function; solver *_solver; static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2; static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1; #ifdef MOJO_OMP omp_lock_t _lock_batch; void lock_batch() {omp_set_lock(&_lock_batch);} void unlock_batch() {omp_unset_lock(&_lock_batch);} void init_lock() {omp_init_lock(&_lock_batch);} void destroy_lock() {omp_destroy_lock(&_lock_batch);} int get_thread_num() {return omp_get_thread_num();} #else void lock_batch() {} void unlock_batch() {} void init_lock(){} void destroy_lock() {} int get_thread_num() {return 0;} #endif public: // training progress stuff int train_correct; int train_skipped; int stuck_counter; int train_updates; int train_samples; int epoch_count; int max_epochs; float best_estimated_accuracy; int best_accuracy_count; float old_estimated_accuracy; float estimated_accuracy; // data augmentation stuff int use_augmentation; // 0=off, 1=mojo, 2=opencv int augment_x, augment_y; int augment_h_flip, augment_v_flip; mojo::pad_type augment_pad; float augment_theta; float augment_scale; // here we have multiple sets of the layers to allow threading and batch processing // a separate layer set is needed for each independent thread std::vector< std::vector<base_layer *>> layer_sets; std::map<std::string, int> layer_map; // name-to-index of layer for layer management std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected std::vector<matrix *> W; // these are the weights between/connecting layers // these sets are needed because we need copies for each item in mini-batch std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1) { _internal_thread_count=1; _size=0; _solver = new_solver(opt_name); _cost_function = NULL; //std::vector<base_layer *> layer_set; //layer_sets.push_back(layer_set); layer_sets.resize(1); dW_sets.resize(_batch_size); dbias_sets.resize(_batch_size); batch_open.resize(_batch_size); _running_sum_E = 0.; train_correct = 0; train_samples = 0; train_skipped = 0; epoch_count = 0; max_epochs = 1000; train_updates = 0; estimated_accuracy = 0; old_estimated_accuracy = 0; stuck_counter = 0; best_estimated_accuracy=0; best_accuracy_count=0; use_augmentation=0; augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0; augment_pad =mojo::edge; augment_theta=0; augment_scale=0; init_lock(); #ifdef USE_AF af::setDevice(0); af::info(); #endif } ~network() { clear(); if (_cost_function) delete _cost_function; if(_solver) delete _solver; destroy_lock(); } // call clear if you want to load a different configuration/model void clear() { for(int i=0; i<(int)layer_sets.size(); i++) { __for__(auto l __in__ layer_sets[i]) delete l; layer_sets.clear(); } layer_sets.clear(); __for__(auto w __in__ W) if(w) delete w; W.clear(); layer_map.clear(); layer_graph.clear(); } // output size of final layer; int out_size() {return _size;} // get input size bool get_input_size(int *w, int *h, int *c) { if(layer_sets[MAIN_LAYER_SET].size()<1) return false; *w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans; return true; } // sets up number of layer copies to run over multiple threads void build_layer_sets() { int layer_cnt = (int)layer_sets.size(); if (layer_cnt<_thread_count) layer_sets.resize(_thread_count); // ToDo: add shrink back / else if(layer_cnt>_thread_count) sync_layer_sets(); } inline int get_thread_count() {return _thread_count;} // must call this with max thread count before constructing layers // value <1 will result in thread count = # cores (including hyperthreaded) void enable_external_threads(int threads = -1) { #ifdef MOJO_OMP if (threads < 1) threads = omp_get_num_procs(); _thread_count = threads; if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count); omp_set_nested(1); #else if (threads < 1) _thread_count = 1; else _thread_count = threads; if (threads > 1) bail("must define MOJO_OMP to used threading"); #endif build_layer_sets(); } void enable_internal_threads(int threads = -1) { #ifdef MOJO_OMP if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count if(threads<1) _internal_thread_count=1; else _internal_thread_count=threads; omp_set_nested(1); #else _internal_thread_count=1; #endif } // when using threads, need to get bias data synched between all layer sets, // call this after bias update in main layer set to copy the bias to the other sets void sync_layer_sets() { for(int i=1; i<(int)layer_sets.size();i++) for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) (layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k]; } // used to add some noise to weights void heat_weights() { __for__(auto w __in__ W) { if (!w) continue; matrix noise(w->cols, w->rows, w->chans); noise.fill_random_normal(1.f/ noise.size()); //noise *= *w; *w += noise; } } // used to add some noise to weights void remove_means() { __for__(auto w __in__ W) if(w) w->remove_mean(); } // used to push a layer back in the ORDERED list of layers // if connect_all() is used, then the order of the push_back is used to connect the layers // when forward or backward propogation, this order is used for the serialized order of calculations // Layer_name must be unique. bool push_back(const char *layer_name, const char *layer_config) { if(layer_map[layer_name]) return false; //already exists base_layer *l=new_layer(layer_name, layer_config); // set map to index // make sure there is a 'set' to add layers to if(layer_sets.size()<1) { std::vector<base_layer *> layer_set; layer_sets.push_back(layer_set); } // make sure layer_sets are created build_layer_sets(); layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size(); layer_sets[MAIN_LAYER_SET].push_back(l); // upadate as potential last layer - so it sets the out size _size=l->fan_size(); // add other copies needed for threading for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config)); return true; } // connect 2 layers together and initialize weights // top and bottom concepts are reversed from literature // my 'top' is the input of a forward() pass and the 'bottom' is the output // perhaps 'top' traditionally comes from the brain model, but my 'top' comes // from reading order (information flows top to bottom) void connect(const char *layer_name_top, const char *layer_name_bottom) { size_t i_top=layer_map[layer_name_top]; size_t i_bottom=layer_map[layer_name_bottom]; base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top]; base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom]; int w_i=(int)W.size(); matrix *w = l_bottom->new_connection(*l_top, w_i); W.push_back(w); layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom)); // need to build connections for other batches/threads for(int i=1; i<(int)layer_sets.size(); i++) { l_top= layer_sets[i][i_top]; l_bottom= layer_sets[i][i_bottom]; delete l_bottom->new_connection(*l_top, w_i); } // we need to let solver prepare space for stateful information if (_solver) { if (w)_solver->push_back(w->cols, w->rows, w->chans); else _solver->push_back(1, 1, 1); } int fan_in=l_bottom->fan_size(); int fan_out=l_top->fan_size(); // ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet) // after all connections, run through and do weights with correct fan count // initialize weights - ToDo: separate and allow users to configure(?) if (w && l_bottom->has_weights()) { if (strcmp(l_bottom->p_act->name, "tanh") == 0) { // xavier : for tanh float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out))); // float weight_base = (float)(std::sqrt(.25/( (double)fan_in))); w->fill_random_uniform(weight_base); } else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0)) { // xavier : for sigmoid float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out))); w->fill_random_uniform(weight_base); } else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0) || (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0)) { // he : for relu float weight_base = (float)(std::sqrt(2. / (double)fan_in)); w->fill_random_normal(weight_base); } else { // lecun : orig float weight_base = (float)(std::sqrt(1. / (double)fan_in)); w->fill_random_uniform(weight_base); } } else if (w) w->fill(0); } // automatically connect all layers in the order they were provided // easy way to go, but can't deal with branch/highway/resnet/inception types of architectures void connect_all() { for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++) connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str()); } int get_layer_index(const char *name) { for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++) if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0) return j; return -1; } // get the list of layers used (but not connection information) std::string get_configuration() { std::string str; // print all layer configs for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string(); str += "\n"; // print layer links if (layer_graph.size() <= 0) return str; for (int j = 0; j < (int)layer_graph.size(); j++) { if (j % 3 == 0) str += " "; if((j % 3 == 1)|| (j % 3 == 2)) str += ", "; str +=layer_graph[j].first + "-" + layer_graph[j].second; if (j % 3 == 2) str += "\n"; } return str; } // performs forward pass and returns class index // do not delete or modify the returned pointer. it is a live pointer to the last layer in the network // if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe int predict_class(const float *in, int _thread_number = -1) { const float* out = forward(in, _thread_number); // for(int i = 0; i < out_size(); i++) // printf("%d: %f\n", i, out[i]); return arg_max(out, out_size()); } //---------------------------------------------------------------------------------------------------------- // F O R W A R D // // the main forward pass // if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe // train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..) float* forward(const float *in, int _thread_number=-1, int _train=0) { if(_thread_number<0) _thread_number=get_thread_num(); if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n"); if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n"); //std::cout << get_thread_num() << ","; // clear nodes to zero & find input layers std::vector<base_layer *> inputs; __for__(auto layer __in__ layer_sets[_thread_number]) { if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer); layer->set_threading(_internal_thread_count); layer->node.fill(0.f); } // first layer assumed input. copy input to it const float *in_ptr = in; //base_layer * layer = layer_sets[_thread_number][0]; //memcpy(layer->node.x, in, sizeof(float)*layer->node.size()); __for__(auto layer __in__ inputs) { memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size()); in_ptr += layer->node.size(); } //for (int i = 0; i < layer->node.size(); i++) // layer_sets[_thread_number][0]->node.x[i] = in[i]; // for all layers __for__(auto layer __in__ layer_sets[_thread_number]) { // add bias and activate these outputs (they should all be summed up from other branches at this point) //for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|"; layer->activate_nodes(); //for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|"; // send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition __for__ (auto &link __in__ layer->forward_linked_layers) { // instead of having a list of paired connections, just use the shape of W to determine connections // this is harder to read, but requires less look-ups // the 'link' variable is a std::pair created during the connect() call for the layers int connection_index = link.first; base_layer *p_bottom = link.second; // weight distribution of the signal to layers under it #ifdef MOJO_PROFILE_LAYERS StartCounter(); #endif p_bottom->accumulate_signal(*layer, *W[connection_index], _train); //if (p_bottom->has_weights()) //for(int j=0; j<layer->node.chans; j++) //int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|"; #ifdef MOJO_PROFILE_LAYERS std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n"; #endif } } // return pointer to float * result from last layer /* std::cout << "out:"; for (int i = 0; i < 10; i++) { std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<","; } std::cout << "\n"; */ return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x; } //---------------------------------------------------------------------------------------------------------- // W R I T E // // write parameters to stream/file // note that this does not persist intermediate training information that could be needed to 'pickup where you left off' bool write(std::ofstream& ofs, bool binary = false, bool final = false) { // save layers int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size(); // int ignore_cnt = 0; // for (int j = 0; j<(int)layer_sets[0].size(); j++) // if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++; ofs<<"mojo01" << std::endl; ofs<<(int)(layer_cnt)<<std::endl; for(int j=0; j<(int)layer_sets[0].size(); j++) ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string(); // if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) // save graph ofs<<(int)layer_graph.size()<<std::endl; for(int j=0; j<(int)layer_graph.size(); j++) ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl; if(binary) { ofs<<(int)1<<std::endl; // flags that this is binary data // binary version to save space if needed // save bias info for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) if(layer_sets[MAIN_LAYER_SET][j]->use_bias()) ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float)); // save weights for (int j = 0; j < (int)W.size(); j++) { if (W[j]) ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float)); } } else { ofs<<(int)0<<std::endl; // save bias info for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) { if (layer_sets[MAIN_LAYER_SET][j]->use_bias()) { for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " "; ofs << std::endl; } } // save weights for(int j=0; j<(int)W.size(); j++) { if (W[j]) { for (int i = 0; i < W[j]->size(); i++) ofs << W[j]->x[i] << " "; ofs << std::endl; } } } ofs.flush(); return true; } bool write(std::string &filename, bool binary = false, bool final = false) { std::ofstream temp((const char *)filename.c_str(), std::ios::binary); return write(temp, binary, final); }//, std::ofstream::binary); bool write(char *filename, bool binary = false, bool final = false) { std::string str= filename; return write(str, binary, final); } // read network from a file/stream std::string getcleanline(std::istream& ifs) { std::string s; // The characters in the stream are read one-by-one using a std::streambuf. // That is faster than reading them one-by-one using the std::istream. // Code that uses streambuf this way must be guarded by a sentry object. // The sentry object performs various tasks, // such as thread synchronization and updating the stream state. std::istream::sentry se(ifs, true); std::streambuf* sb = ifs.rdbuf(); for (;;) { int c = sb->sbumpc(); switch (c) { case '\n': return s; case '\r': if (sb->sgetc() == '\n') sb->sbumpc(); return s; case EOF: // Also handle the case when the last line has no line ending if (s.empty()) ifs.setstate(std::ios::eofbit); return s; default: s += (char)c; } } } //---------------------------------------------------------------------------------------------------------- // R E A D // bool read(std::istream &ifs) { if(!ifs.good()) return false; std::string s; s = getcleanline(ifs); int layer_count; int version = 0; if (s.compare("mojo01")==0) { s = getcleanline(ifs); layer_count = atoi(s.c_str()); version = 1; } else if (s.find("mojo:") == 0) { version = -1; int cnt = 1; while (!ifs.eof()) { s = getcleanline(ifs); if (s.empty()) continue; if(s[0]=='#') continue; push_back(int2str(cnt).c_str(), s.c_str()); cnt++; } connect_all(); // copies batch=0 stuff to other batches sync_layer_sets(); return true; } else layer_count = atoi(s.c_str()); // read layer def std::string layer_name; std::string layer_def; for (auto i=0; i<layer_count; i++) { layer_name = getcleanline(ifs); layer_def = getcleanline(ifs); push_back(layer_name.c_str(),layer_def.c_str()); } // read graph int graph_count; ifs>>graph_count; getline(ifs,s); // get endline if (graph_count <= 0) { connect_all(); } else { std::string layer_name1; std::string layer_name2; for (auto i=0; i<graph_count; i++) { layer_name1= getcleanline(ifs); layer_name2 = getcleanline(ifs); connect(layer_name1.c_str(),layer_name2.c_str()); } } int binary; s=getcleanline(ifs); // get endline binary = atoi(s.c_str()); // binary version to save space if needed if(binary==1) { for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) if (layer_sets[MAIN_LAYER_SET][j]->use_bias()) { //int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans; //int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride; //for (int i = 0; i < layer_sets[MAIN_LAYER_SET][j]->bias.size(); i++) ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float)); } for (int j = 0; j < (int)W.size(); j++) { if (W[j]) { ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float)); // for(int i = 0; i < W[j]->size(); i++) // printf("W[j]->x[%d] = %f\n", i, W[j]->x[i]); } } } else if(binary==0)// text version { // read bias for(int j=0; j<layer_count; j++) { if (layer_sets[MAIN_LAYER_SET][j]->use_bias()) { // int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans; // int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride; // for (int i = 0; i < c; i++) for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) { ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k]; //std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ","; } ifs.ignore();// getline(ifs, s); // get endline } } // read weights for (auto j=0; j<(int)W.size(); j++) { if (W[j]) { for (int i = 0; i < W[j]->size(); i++) ifs >> W[j]->x[i]; ifs.ignore(); //getline(ifs, s); // get endline } } } // copies batch=0 stuff to other batches sync_layer_sets(); return true; } bool read(std::string filename) { std::ifstream fs(filename.c_str(),std::ios::binary); if (fs.is_open()) { bool ret = read(fs); fs.close(); return ret; } else return false; } bool read(const char *filename) { return read(std::string(filename)); } #ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed // =========================================================================== // training part // =========================================================================== // resets the state of all batches to 'free' state void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); } // sets up number of mini batches (storage for sets of weight deltas) void set_mini_batch_size(int batch_cnt) { if (batch_cnt<1) batch_cnt = 1; _batch_size = batch_cnt; dW_sets.resize(_batch_size); dbias_sets.resize(_batch_size); batch_open.resize(_batch_size); reset_mini_batch(); } int get_mini_batch_size() { return _batch_size; } // return index of next free batch // or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call) // or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees) int get_next_open_batch() { int reserved = 0; int filled = 0; for (int i = 0; i<batch_open.size(); i++) { if (batch_open[i] == BATCH_FREE) return i; if (batch_open[i] == BATCH_RESERVED) reserved++; if (batch_open[i] == BATCH_COMPLETE) filled++; } if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete bail("threading error"); // should not get here unless threading problem } //---------------------------------------------------------------------------------------------------------- // s y n c m i n i b a t c h // // apply all weights to first set of dW, then apply to model weights void sync_mini_batch() { // need to ensure no batches in progress (reserved) int next = get_next_open_batch(); if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock"); int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size(); base_layer *layer; // sum contributions for (int k = layer_cnt - 1; k >= 0; k--) { layer = layer_sets[MAIN_LAYER_SET][k]; __for__(auto &link __in__ layer->backward_linked_layers) { int w_index = (int)link.first; // if batch free, then make sure it is zero'd out because we will increment dW set [0] if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0); for (int b = 1; b< _batch_size; b++) { if (batch_open[b] == BATCH_COMPLETE) dW_sets[0][w_index] += dW_sets[b][w_index]; } } if (dynamic_cast<convolution_layer*> (layer) != NULL) continue; // bias stuff... that needs to be fixed for conv layers perhaps if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0); for (int b = 1; b< _batch_size; b++) { if (batch_open[b] == BATCH_COMPLETE) dbias_sets[0][k] += dbias_sets[b][k]; } } // update weights for (int k = layer_cnt - 1; k >= 0; k--) { layer = layer_sets[MAIN_LAYER_SET][k]; __for__(auto &link __in__ layer->backward_linked_layers) { int w_index = (int)link.first; if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0) if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10% } layer->update_bias(dbias_sets[0][k], _solver->learning_rate); } // prepare to start mini batch over reset_mini_batch(); train_updates++; // could have no updates .. so this is not exact sync_layer_sets(); } // reserve_next.. is used to reserve a space in the minibatch for the existing training sample int reserve_next_batch() { lock_batch(); int my_batch_index = -3; while (my_batch_index < 0) { my_batch_index = get_next_open_batch(); if (my_batch_index >= 0) // valid index { batch_open[my_batch_index] = BATCH_RESERVED; unlock_batch(); return my_batch_index; } else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete { sync_mini_batch(); // resets _batch_index to 0 my_batch_index = get_next_open_batch(); batch_open[my_batch_index] = BATCH_RESERVED; unlock_batch(); return my_batch_index; } // need to wait for ones in progress to finish unlock_batch(); mojo_sleep(1); lock_batch(); } return -3; } float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;} void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;} void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();} bool get_smart_training() {return _smart_train;} void set_smart_training(bool _use_train) { _smart_train = _use_train;} float get_smart_train_level() { return _skip_energy_level; } void set_smart_train_level(float _level) { _skip_energy_level = _level; } void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; } int get_epoch() { return epoch_count; } // goal here is to update the weights W. // use w_new = w_old - alpha dE/dw // E = sum: 1/2*||y-target||^2 // note y = f(x*w) // dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev // similarly for cross entropy // =========================================================================== // training part // =========================================================================== void set_random_augmentation(int translate_x, int translate_y, int flip_h, int flip_v, mojo::pad_type padding = mojo::edge) { use_augmentation = 1; augment_x = translate_x; augment_y = translate_y; augment_h_flip = flip_h; augment_v_flip = flip_v; augment_pad = padding; augment_theta = 0; augment_scale = 0; } void set_random_augmentation(int translate_x, int translate_y, int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge) { use_augmentation = 2; augment_x = translate_x; augment_y = translate_y; augment_h_flip = flip_h; augment_v_flip = flip_v; augment_pad = padding; augment_theta = rotation_deg; augment_scale = scale; } // call before starting training for current epoch void start_epoch(std::string loss_function="mse") { _cost_function=new_cost_function(loss_function); train_correct = 0; train_skipped = 0; train_updates = 0; train_samples = 0; if (epoch_count == 0) reset_solver(); // accuracy not improving .. slow learning if(_smart_train && (best_accuracy_count > 4)) { stuck_counter++; set_learning_rate((0.5f)*get_learning_rate()); if (get_learning_rate() < 0.000001f) { // heat_weights(); set_learning_rate(0.000001f); stuck_counter++;// end of the line.. so speed up end } best_accuracy_count = 0; } old_estimated_accuracy = estimated_accuracy; estimated_accuracy = 0; //_skip_energy_level = 0.05; _running_sum_E = 0; } // time to stop? bool elvis_left_the_building() { // 2 stuck x 4 non best accuracy to quit = 8 times no improvement if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true; else return false; } // call after putting all training samples through this epoch bool end_epoch() { // run leftovers through mini-batch sync_mini_batch(); epoch_count++; // estimate accuracy of validation run estimated_accuracy = 100.f*train_correct / train_samples; if (train_correct > best_estimated_accuracy) { best_estimated_accuracy = (float)train_correct; best_accuracy_count = 0; stuck_counter = 0; } else best_accuracy_count++; return elvis_left_the_building(); } // if smart training was thinking about exiting, calling reset will make it think everything is OK void reset_smart_training() { stuck_counter=0; best_accuracy_count = 0; best_estimated_accuracy = 0; } //---------------------------------------------------------------------------------------------------------- // u p d a t e _ s m a r t _ t r a i n // void update_smart_train(const float E, bool correct) { #ifdef MOJO_OMP #pragma omp critical #endif { train_samples++; if (correct) train_correct++; if (_smart_train) { _running_E.push_back(E); _running_sum_E += E; const int SMART_TRAIN_SAMPLE_SIZE = 1000; int s = (int)_running_E.size(); if (s >= SMART_TRAIN_SAMPLE_SIZE) { _running_sum_E /= (double)s; std::sort(_running_E.begin(), _running_E.end()); float top_fraction = (float)_running_sum_E*10.f; //10. const float max_fraction = 0.75f; const float min_fraction = 0.075f;// 0.03f; if (top_fraction > max_fraction) top_fraction = max_fraction; if (top_fraction < min_fraction) top_fraction = min_fraction; int index = s - 1 - (int)(top_fraction*(s - 1)); if (_running_E[index] > 0) _skip_energy_level = _running_E[index]; _running_sum_E = 0; _running_E.clear(); } } if (E > 0 && E < _skip_energy_level) { //std::cout << "E=" << E; train_skipped++; } } // omp critical } // finish back propogation through the hidden layers void backward_hidden(const int my_batch_index, const int thread_number) { const int layer_cnt = (int)layer_sets[thread_number].size(); const int last_layer_index = layer_cnt - 1; base_layer *layer;// = layer_sets[thread_number][last_layer_index]; // update hidden layers // start at lower layer and push information up to previous layer // handle dropout first for (int k = last_layer_index; k >= 0; k--) { layer = layer_sets[thread_number][k]; // all the signals should be summed up to this layer by now, so we go through and take the grad of activiation int nodes = layer->node.size(); // already did last layer, so skip it if (k< last_layer_index) for (int i = 0; i< nodes; i++) layer->delta.x[i] *= layer->df(layer->node.x, i, nodes); // now pass that signal upstream __for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop { base_layer *p_top = link.second; // note all the delta[connections[i].second] should have been calculated by time we get here layer->distribute_delta(*p_top, *W[link.first]); } } // update weights - shouldn't matter the direction we update these // we can stay in backwards direction... // it was not faster to combine distribute_delta and increment_w into the same loop int size_W = (int)W.size(); dW_sets[my_batch_index].resize(size_W); dbias_sets[my_batch_index].resize(layer_cnt); for (int k = last_layer_index; k >= 0; k--) { layer = layer_sets[thread_number][k]; __for__(auto &link __in__ layer->backward_linked_layers) { base_layer *p_top = link.second; int w_index = (int)link.first; //if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue; layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20% // moved this out to sync_mini_batch(); //_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10% } if (dynamic_cast<convolution_layer*> (layer) != NULL) continue; dbias_sets[my_batch_index][k] = layer->delta; } // if all batches finished, update weights lock_batch(); batch_open[my_batch_index] = BATCH_COMPLETE; int next_index = get_next_open_batch(); if (next_index == BATCH_FILLED_COMPLETE) // all complete sync_mini_batch(); // resets _batch_index to 0 unlock_batch(); } mojo::matrix make_input(float *in, const int _thread_number) { mojo::matrix augmented_input;// = auto_augmentation(); std::vector<base_layer *> inputs; int in_size = 0; __for__(auto layer __in__ layer_sets[_thread_number]) { if (dynamic_cast<input_layer*> (layer) != NULL) { inputs.push_back(layer); in_size += layer->node.size(); } } if (use_augmentation > 0) { augmented_input.resize(in_size, 1, 1); float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale; float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta; bool flip_h = ((rand() % 2)*augment_h_flip) ? true: false; bool flip_v = ((rand() % 2)*augment_v_flip) ? true: false; int shift_x = (rand() % (augment_x * 2 + 1)) - augment_x; int shift_y = (rand() % (augment_y * 2 + 1)) - augment_y; int offset = 0; __for__(auto layer __in__ inputs) { //memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size()); //in_ptr += layer->node.size(); // copy input to matrix type mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset); if (m.rows > 1 && m.cols > 1) { #if defined(MOJO_CV2) || defined(MOJO_CV3) if ((augment_theta > 0 || augment_scale > 0)) m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s); #endif if (flip_v)m = m.flip_cols(); if (flip_h) m = m.flip_rows(); mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad); memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size()); offset += aug.size(); } else { memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size()); offset += m.size(); } } // input = augmented_input.x; } else { augmented_input.resize(in_size, 1, 1); memcpy(augmented_input.x, in, sizeof(float)*in_size); } return augmented_input; } //---------------------------------------------------------------------------------------------------------- // T R A I N C L A S S // // after starting epoch, call this to train against a class label // label_index must be 0 to out_size()-1 // for thread safety, you must pass in the thread_index if calling from different threads bool train_class(float *in, int label_index, int _thread_number = -1) { if (_solver == NULL) bail("set solver"); if (_thread_number < 0) _thread_number = get_thread_num(); if (_thread_number > _thread_count) bail("call allow_threads()"); const int thread_number = _thread_number; /* mojo::matrix augmented_input = make_input(in, thread_number); /*/ float *input = in; mojo::matrix augmented_input; if (use_augmentation > 0) { //augment_h_flip = flip_h; //augment_v_flip = flip_v; // copy input to matrix type mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in); #if defined(MOJO_CV2) || defined(MOJO_CV3) if (augment_theta > 0 || augment_scale > 0) { float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale; float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta; m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s); } #endif if (augment_h_flip) if ((rand() % 2) == 0) m = m.flip_cols(); if (augment_v_flip) if ((rand() % 2) == 0) m = m.flip_rows(); augmented_input = m.shift((rand() % (augment_x * 2 + 1)) - augment_x, (rand() % (augment_y * 2 + 1)) - augment_y, augment_pad); input = augmented_input.x; } //*/ // get next free mini_batch slot // this is tied to the current state of the model int my_batch_index = reserve_next_batch(); // out of data or an error if index is negative if (my_batch_index < 0) return false; // run through forward to get nodes activated forward(input, thread_number, 1); // set all deltas to zero __for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f); int layer_cnt = (int)layer_sets[thread_number].size(); // calc delta for last layer to prop back up through network // d = (target-out)* grad_activiation(out) const int last_layer_index = layer_cnt - 1; base_layer *layer = layer_sets[thread_number][last_layer_index]; const int layer_node_size = layer->node.size(); const int layer_delta_size = layer->delta.size(); if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer"); float E = 0; int max_j_out = 0; int max_j_target = label_index; // was passing this in, but may as well just create it on the fly // a vector mapping the label index to the desired target output node values // all -1 except target node 1 std::vector<float> target; if((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)|| (std::string("brokemax").compare(layer->p_act->name) == 0)) target = std::vector<float>(layer_node_size, 0); else target = std::vector<float>(layer_node_size, -1); if(label_index>=0 && label_index<layer_node_size) target[label_index] = 1; //const float grad_fudge = 1.0f; // because of numerator/demoninator cancellations which prevent a divide by zero issue, // we need to handle some things special on output layer float cost_activation_type = 0; if ((std::string("sigmoid").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 1; else if ((std::string("softmax").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 1; else if ((std::string("tanh").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 4; for (int j = 0; j < layer_node_size; j++) { if(cost_activation_type>0) layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]); else layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size); // pick best response if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j; // for better E maybe just look at 2 highest scores so zeros don't dominate float f= mse::cost(layer->node.x[j], target[j]); E += f;//mse::cost(layer->node.x[j], target[j]); } E /= (float)layer_node_size; // check for NAN if (E != E) bail("network blew up - try lowering learning rate\n"); // critical section in here, blocking update bool match = false; if ((max_j_target == max_j_out)) match = true; update_smart_train(E, match); if (E>0 && E<_skip_energy_level && _smart_train && match) { lock_batch(); batch_open[my_batch_index] = BATCH_FREE; unlock_batch(); return false; // return without doing training } backward_hidden(my_batch_index, thread_number); return true; } //---------------------------------------------------------------------------------------------------------- // T R A I N T A R G E T // // after starting epoch, call this to train against a target vector // for thread safety, you must pass in the thread_index if calling from different threads // if positive=1, goal is to minimize the distance between in and target bool train_target(float *in, float *target, int positive=1, int _thread_number = -1) { if (_solver == NULL) bail("set solver"); if (_thread_number < 0) _thread_number = get_thread_num(); if (_thread_number > _thread_count) bail("need to enable OMP"); const int thread_number = _thread_number; mojo::matrix augmented_input = make_input(in, thread_number); float *input = augmented_input.x; // get next free mini_batch slot // this is tied to the current state of the model int my_batch_index = reserve_next_batch(); // out of data or an error if index is negative if (my_batch_index < 0) return false; // run through forward to get nodes activated float *out=forward(in, thread_number, 1); // set all deltas to zero __for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f); int layer_cnt = (int)layer_sets[thread_number].size(); // calc delta for last layer to prop back up through network // d = (target-out)* grad_activiation(out) const int last_layer_index = layer_cnt - 1; base_layer *layer = layer_sets[thread_number][last_layer_index]; const int layer_node_size = layer->node.size(); if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer"); float E = 0; int max_j_out = 0; //int max_j_target = label_index; // was passing this in, but may as well just create it on the fly // a vector mapping the label index to the desired target output node values // all -1 except target node 1 // std::vector<float> target; //if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)) // target = std::vector<float>(layer_node_size, 0); // else // target = std::vector<float>(layer_node_size, -1); // if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1; const float grad_fudge = 1.0f; // because of numerator/demoninator cancellations which prevent a divide by zero issue, // we need to handle some things special on output layer float cost_activation_type = 0; if ((std::string("sigmoid").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 1; else if ((std::string("softmax").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 1; else if ((std::string("brokemax").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 1; else if ((std::string("tanh").compare(layer->p_act->name) == 0) && (std::string("cross_entropy").compare(_cost_function->name) == 0)) cost_activation_type = 4; for (int j = 0; j < layer_node_size; j++) { if (positive) // want to minimize distance { if (cost_activation_type > 0) layer->delta.x[j] = grad_fudge*cost_activation_type*(layer->node.x[j] - target[j]); else layer->delta.x[j] = grad_fudge*_cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size); } else { if (cost_activation_type > 0) layer->delta.x[j] = grad_fudge*cost_activation_type*(1.f-abs(layer->node.x[j] - target[j])); else layer->delta.x[j] = grad_fudge*(1.f-abs(_cost_function->d_cost(layer->node.x[j], target[j])))*layer->df(layer->node.x, j, layer_node_size); } // pick best response if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j; // for better E maybe just look at 2 highest scores so zeros don't dominate // L2 distance x 2 E += mse::cost(layer->node.x[j], target[j]); } E /= (float)layer_node_size; // check for NAN if (E != E) bail("network blew up - try lowering learning rate\n"); // critical section in here, blocking update bool match = false; // FIxME if ((max_j_target == max_j_out)) match = true; if (E < 0.01 && positive) match = true; else if (E > 0.1 && !positive) match = true; update_smart_train(E, match); if (E>0 && E<_skip_energy_level && _smart_train && match) { lock_batch(); batch_open[my_batch_index] = BATCH_FREE; unlock_batch(); return false; // return without doing training } backward_hidden(my_batch_index, thread_number); return true; } #else float get_learning_rate() {return 0;} void set_learning_rate(float alpha) {} void train(float *in, float *target){} void reset() {} float get_smart_train_level() {return 0;} void set_smart_train_level(float _level) {} bool get_smart_train() { return false; } void set_smart_train(bool _use) {} #endif }; }
version3_2.c
// Compile with: // // // To specify the number of bodies in the world, the program optionally accepts // an integer as its first command line argument. #include <time.h> #include <sys/times.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <X11/Xlib.h> #include <unistd.h> #include "omp.h" #define WIDTH 1024 #define HEIGHT 768 // default number of bodies #define DEF_NUM_BODIES 2000 // gravitational constant #define GRAV 10.0 // initial velocities are scaled by this value #define V_SCALAR 20.0 // initial masses are scaled by this value #define M_SCALAR 5.0 // radius scalar #define R_SCALAR 3 // coefficient of restitution determines the elasticity of a collision: C_REST = [0,1] // if C_REST = 0 -> perfectly inelastic (particles stick together) // if C_REST = 1 -> perfectly elastic (no loss of speed) #define C_REST 0.5 // set the iteration times #define iteration_times 100 // Must set 0 if run on Pi #define NOT_RUN_ON_PI 1 #define TREERATIO 3 struct body { double x, y; // position double vx, vy; // velocity double ax, ay; //accelerate double m; // mass double r; // radius of the particle }; struct world { struct body *bodies; int num_bodies; }; clock_t total_time = 0; //total_time.sec = 0; //total_time.usec = 0; double max (double a, double b) {return (a > b ? a : b);} double min (double a, double b) {return (a < b ? a : b);} struct node { struct body * bodyp; struct node * q1; struct node * q2; struct node * q3; struct node * q4; double totalmass; double centerx, centery; double xmin, xmax; double ymin, ymax; double diag; }; enum quadrant { q1, q2, q3, q4 }; enum quadrant getquadrant(double x, double y, double xmin, double xmax, double ymin, double ymax) //makes a rectangle with bounds of xmin,xmax,ymin,ymax, and returns the quadrant that (x,y) is in { double midx, midy; midx = xmin + 0.5*(xmax - xmin); midy = ymin + 0.5*(ymax - ymin); if(y > midy) { if(x > midx) return q1; else return q2; } else { if(x > midx) return q4; else return q3; } } struct node * createnode(struct body * bodyp, double xmin, double xmax, double ymin, double ymax) //creates a leaf node to insert into the tree { struct node * rootnode; if(!(rootnode=malloc(sizeof(struct node)))) { printf("Unable to allocate node, exit"); return 0; } rootnode->totalmass = bodyp->m; rootnode->centerx = bodyp->x; rootnode->centery = bodyp->y; rootnode->xmin = xmin; rootnode->xmax = xmax; rootnode->ymin = ymin; rootnode->ymax = ymax; rootnode->diag = sqrt(( pow(xmax - xmin, 2) + pow(ymax - ymin, 2) )); rootnode->bodyp = bodyp; rootnode->q1 = NULL; rootnode->q2 = NULL; rootnode->q3 = NULL; rootnode->q4 = NULL; return rootnode; } void updatecenterofmass(struct node * nodep, struct body * bodyp) //updates the center of mass after inserting a point into a branch { nodep->centerx = (nodep->totalmass*nodep->centerx + bodyp->m*bodyp->x)/(nodep->totalmass + bodyp->m); nodep->centery = (nodep->totalmass*nodep->centery + bodyp->m*bodyp->y)/(nodep->totalmass + bodyp->m); nodep->totalmass += bodyp->m; return; } void insertbody(struct body * insbody, struct node * nodep) //inserts a body into the tree, converting leaf nodes into branches if necessary { enum quadrant existingquad, newquad; double xmid, ymid; xmid = nodep->xmin + 0.5*(nodep->xmax - nodep->xmin); ymid = nodep->ymin + 0.5*(nodep->ymax - nodep->ymin); if(nodep->bodyp != NULL) //if the node is a leaf convert to a branch by inserting the leaf point into one of its subquadrants { existingquad = getquadrant(nodep->bodyp->x, nodep->bodyp->y, nodep->xmin, nodep->xmax, nodep->ymin, nodep->ymax); switch (existingquad) { case q1: nodep->q1 = createnode(nodep->bodyp, xmid, nodep->xmax, ymid, nodep->ymax); break; case q2: nodep->q2 = createnode(nodep->bodyp, nodep->xmin, xmid, ymid, nodep->ymax); break; case q3: nodep->q3 = createnode(nodep->bodyp, nodep->xmin, xmid, nodep->ymin, ymid); break; case q4: nodep->q4 = createnode(nodep->bodyp, xmid, nodep->xmax, nodep->ymin, ymid); break; } nodep->bodyp = NULL; } newquad = getquadrant(insbody->x, insbody->y, nodep->xmin, nodep->xmax, nodep->ymin, nodep->ymax); updatecenterofmass(nodep,insbody); switch (newquad) //insert the new point into one of the quadrants if empty, otherwise recurse deeper into tree { case q1: if(nodep->q1 == NULL) { nodep->q1 = createnode(insbody, xmid, nodep->xmax, ymid, nodep->ymax); } else { insertbody(insbody,nodep->q1); } break; case q2: if(nodep->q2 == NULL) { nodep->q2 = createnode(insbody, nodep->xmin, xmid, ymid, nodep->ymax); } else { insertbody(insbody,nodep->q2); } break; case q3: if(nodep->q3 == NULL) { nodep->q3 = createnode(insbody, nodep->xmin, xmid, nodep->ymin, ymid); } else { insertbody(insbody,nodep->q3); } break; case q4: if(nodep->q4 == NULL) { nodep->q4 = createnode(insbody, xmid, nodep->xmax, nodep->ymin, ymid); } else { insertbody(insbody,nodep->q4); } break; } } void treesum(struct node * nodep, struct body * bodyp, double ratiothreshold ) //sum the forces on body bodyp from points in tree with root node nodep { double dx, dy, r, rsqr; //x distance, y distance, distance, distance^2 double accel; double a_over_r; dx = nodep->centerx - bodyp->x; dy = nodep->centery - bodyp->y; rsqr = pow(dx,2) + pow(dy,2); r = sqrt(rsqr); if(r < 25){ r = 25; } if( (((r/nodep->diag) > ratiothreshold) || (nodep->bodyp))&&(nodep->bodyp!=bodyp) ) { accel = (10.0) * nodep->totalmass / r/r/r; bodyp->ax += accel*dx; bodyp->ay += accel*dy; } else { if(nodep->q1) { treesum(nodep->q1, bodyp, ratiothreshold); } if(nodep->q2) { treesum(nodep->q2, bodyp, ratiothreshold); } if(nodep->q3) { treesum(nodep->q3, bodyp, ratiothreshold); } if(nodep->q4) { treesum(nodep->q4, bodyp, ratiothreshold); } } return; } void destroytree(struct node * nodep) { if(nodep != NULL) { if(nodep->q1 != NULL) { destroytree(nodep->q1); } if(nodep->q2 != NULL) { destroytree(nodep->q2); } if(nodep->q3 != NULL) { destroytree(nodep->q3); } if(nodep->q4 != NULL) { destroytree(nodep->q4); } free(nodep); } } /* This function initializes each particle's mass, velocity and position */ struct world* create_world(int num_bodies) { struct world *world = malloc(sizeof(struct world)); world->num_bodies = num_bodies; world->bodies = malloc(sizeof(struct body)*num_bodies); int i = 0; double x; double y; double rc; int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT; while (i<num_bodies) { x = drand48() * WIDTH; y = drand48() * HEIGHT; rc = sqrt((WIDTH/2-x)*(WIDTH/2-x) + (y-HEIGHT/2)*(y-HEIGHT/2)); if (rc <= min_dim/2) { world->bodies[i].x = x; world->bodies[i].y = y; world->bodies[i].vx = V_SCALAR * (y-HEIGHT/2) / rc; world->bodies[i].vy = V_SCALAR * (WIDTH/2-x) / rc; world->bodies[i].ax = 0; world->bodies[i].ay = 0; world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR; world->bodies[i].r = sqrt(world->bodies[i].m / M_PI) * R_SCALAR; i++; } } return world; } // set the foreground color given RGB values between 0..255. void set_color(Display *disp, GC gc, int r, int g, int b){ unsigned long int p ; if (r < 0) r = 0; else if (r > 255) r = 255; if (g < 0) g = 0; else if (g > 255) g = 255; if (b < 0) b = 0; else if (b > 255) b = 255; p = (r << 16) | (g << 8) | (b) ; XSetForeground(disp, gc, p) ; } /* This function updates the screen with the new positions of each particle */ void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) { int i; double x, y, r, r2; // we turn off aliasing for faster draws set_color(disp, gc, 255, 255, 255); XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT); for (i = 0; i < world->num_bodies; i++) { r = world->bodies[i].r; x = world->bodies[i].x - r; y = world->bodies[i].y - r; r2 = r + r; // draw body set_color(disp, gc, 255*7/10, 255*7/10, 255*7/10); XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360*64); set_color(disp, gc, 0, 0, 0); XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360*64); } } void collision_step(struct world *world) { int a, b; double r, x, y, vx, vy; // Impose screen boundaries by reversing direction if body is off screen for (a = 0; a < world->num_bodies; a++) { r = world->bodies[a].r; x = world->bodies[a].x; y = world->bodies[a].y; vx = world->bodies[a].vx; vy = world->bodies[a].vy; if (x-r < 0) { // left edge if (vx < 0) { world->bodies[a].vx = -C_REST * vx; } world->bodies[a].x = r; } else if (x+r > WIDTH) { // right edge if (vx > 0) { world->bodies[a].vx = -C_REST * vx; } world->bodies[a].x = WIDTH - r; } if (y-r < 0) { // bottom edge if (vy < 0) { world->bodies[a].vy = -C_REST * vy; } world->bodies[a].y = r; } else if (y+r > HEIGHT) { // top edge if (vy > 0) { world->bodies[a].vy = -C_REST * vy; } world->bodies[a].y = HEIGHT - r; } } } void position_step(struct world *world, double time_res){ struct node * rootnode; //struct body * bodies = world->bodies; //int nbodies = world->num_bodies; double xmin, xmax, ymin, ymax; xmin = 0.0; xmax = 0.0; ymin = 0.0; ymax = 0.0; for(int i = 0; i < world->num_bodies; i++) //reset accel { world->bodies[i].ax = 0.0; world->bodies[i].ay = 0.0; xmin=min(xmin,world->bodies[i].x); xmax=max(xmax,world->bodies[i].x); ymin=min(ymin,world->bodies[i].y); ymax=max(ymax,world->bodies[i].y); } rootnode = createnode(world->bodies+0,xmin,xmax,ymin,ymax); //rootnode = createnode(bodies+0,0,WIDTH,0,HEIGHT); for(int i = 1; i < world->num_bodies; i++) { insertbody(world->bodies+i, rootnode); } #pragma omp parallel { #pragma omp for for(int i = 0; i < world->num_bodies; i++) //sum accel { treesum(rootnode, world->bodies+i, TREERATIO); } #pragma omp for for(int i = 0; i < world->num_bodies; i++) { //Update velocities world->bodies[i].vx += world->bodies[i].ax * time_res; world->bodies[i].vy += world->bodies[i].ay * time_res; //Update positions world->bodies[i].x += world->bodies[i].vx * time_res; world->bodies[i].y += world->bodies[i].vy * time_res; } } destroytree(rootnode); } void step_world(struct world *world, double time_res) { struct tms ttt; clock_t start, end; start = times(&ttt); position_step(world, time_res); end = times(&ttt); total_time += end - start; collision_step(world); } /* Main method runs initialize() and update() */ int main(int argc, char **argv) { //total_time.tv_sec = 0; //total_time.tv_usec = 0; /* get num bodies from the command line */ int num_bodies; num_bodies = (argc == 2) ? atoi(argv[1]) : DEF_NUM_BODIES; printf("Universe has %d bodies.\n", num_bodies); omp_set_num_threads(8); /* set up the universe */ time_t cur_time; time(&cur_time); srand48((long)cur_time); // seed the RNG used in create_world struct world *world = create_world(num_bodies); /* set up graphics using Xlib */ #if NOT_RUN_ON_PI Display *disp = XOpenDisplay(NULL); int scr = DefaultScreen(disp); Window win = XCreateSimpleWindow( disp, RootWindow(disp, scr), 0, 0, WIDTH, HEIGHT, 0, BlackPixel(disp, scr), WhitePixel(disp, scr)); XStoreName(disp, win, "N-Body Simulator"); Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr), WIDTH, HEIGHT, DefaultDepth(disp, scr)); GC gc = XCreateGC(disp, back_buf, 0, 0); // Make sure we're only looking for messages about closing the window Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0); XSetWMProtocols(disp, win, &del_window, 1); XSelectInput(disp, win, StructureNotifyMask); XMapWindow(disp, win); XEvent event; // wait until window is mapped while (1) { XNextEvent(disp, &event); if (event.type == MapNotify) { break; } } #endif struct timespec delay={0, 1000000000 / 60}; // for 60 FPS struct timespec remaining; double delta_t = 0.1; int ii; for(ii = 0; ii < iteration_times; ii++){ // check if the window has been closed #if NOT_RUN_ON_PI if (XCheckTypedEvent(disp, ClientMessage, &event)) { break; } // we first draw to the back buffer then copy it to the front (`win`) draw_world(disp, back_buf, gc, world); XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0); #endif step_world(world, delta_t); //if you want to watch the process in 60 FPS //nanosleep(&delay, &remaining); } // printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000); printf("Nbody Position Calculation Time = :%lf s\n",(double)total_time / (sysconf(_SC_CLK_TCK))); #if NOT_RUN_ON_PI XFreeGC(disp, gc); XFreePixmap(disp, back_buf); XDestroyWindow(disp, win); XCloseDisplay(disp); #endif return 0; }
sstruct_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_SStructPMatrix class. * *****************************************************************************/ #include "_hypre_sstruct_mv.h" #include "_hypre_struct_mv.hpp" /*========================================================================== * SStructPMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix, hypre_SStructPMatrix **matrix_ref ) { hypre_SStructPMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixCreate( MPI_Comm comm, hypre_SStructPGrid *pgrid, hypre_SStructStencil **stencils, hypre_SStructPMatrix **pmatrix_ptr ) { hypre_SStructPMatrix *pmatrix; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; hypre_StructStencil *sstencil; HYPRE_Int *vars; hypre_Index *sstencil_shape; HYPRE_Int sstencil_size; HYPRE_Int new_dim; HYPRE_Int *new_sizes; hypre_Index **new_shapes; HYPRE_Int size; hypre_StructGrid *sgrid; HYPRE_Int vi, vj; HYPRE_Int i, j, k; pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST); hypre_SStructPMatrixComm(pmatrix) = comm; hypre_SStructPMatrixPGrid(pmatrix) = pgrid; hypre_SStructPMatrixStencils(pmatrix) = stencils; nvars = hypre_SStructPGridNVars(pgrid); hypre_SStructPMatrixNVars(pmatrix) = nvars; /* create sstencils */ smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST); new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST); size = 0; for (vi = 0; vi < nvars; vi++) { sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { sstencils[vi][vj] = NULL; new_sizes[vj] = 0; } sstencil = hypre_SStructStencilSStencil(stencils[vi]); vars = hypre_SStructStencilVars(stencils[vi]); sstencil_shape = hypre_StructStencilShape(sstencil); sstencil_size = hypre_StructStencilSize(sstencil); smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST); for (i = 0; i < sstencil_size; i++) { j = vars[i]; new_sizes[j]++; } for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST); new_sizes[vj] = 0; } } for (i = 0; i < sstencil_size; i++) { j = vars[i]; k = new_sizes[j]; hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]); smaps[vi][i] = k; new_sizes[j]++; } new_dim = hypre_StructStencilNDim(sstencil); for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { sstencils[vi][vj] = hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]); } size = hypre_max(size, new_sizes[vj]); } } hypre_SStructPMatrixSMaps(pmatrix) = smaps; hypre_SStructPMatrixSStencils(pmatrix) = sstencils; hypre_TFree(new_sizes, HYPRE_MEMORY_HOST); hypre_TFree(new_shapes, HYPRE_MEMORY_HOST); /* create smatrices */ smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { smatrices[vi][vj] = NULL; if (sstencils[vi][vj] != NULL) { sgrid = hypre_SStructPGridSGrid(pgrid, vi); smatrices[vi][vj] = hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]); } } } hypre_SStructPMatrixSMatrices(pmatrix) = smatrices; /* create symmetric */ symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { symmetric[vi][vj] = 0; } } hypre_SStructPMatrixSymmetric(pmatrix) = symmetric; hypre_SStructPMatrixSEntriesSize(pmatrix) = size; hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST); hypre_SStructPMatrixRefCount(pmatrix) = 1; *pmatrix_ptr = pmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix ) { hypre_SStructStencil **stencils; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; HYPRE_Int vi, vj; if (pmatrix) { hypre_SStructPMatrixRefCount(pmatrix) --; if (hypre_SStructPMatrixRefCount(pmatrix) == 0) { stencils = hypre_SStructPMatrixStencils(pmatrix); nvars = hypre_SStructPMatrixNVars(pmatrix); smaps = hypre_SStructPMatrixSMaps(pmatrix); sstencils = hypre_SStructPMatrixSStencils(pmatrix); smatrices = hypre_SStructPMatrixSMatrices(pmatrix); symmetric = hypre_SStructPMatrixSymmetric(pmatrix); for (vi = 0; vi < nvars; vi++) { HYPRE_SStructStencilDestroy(stencils[vi]); hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { hypre_StructStencilDestroy(sstencils[vi][vj]); hypre_StructMatrixDestroy(smatrices[vi][vj]); } hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST); hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST); hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST); } hypre_TFree(stencils, HYPRE_MEMORY_HOST); hypre_TFree(smaps, HYPRE_MEMORY_HOST); hypre_TFree(sstencils, HYPRE_MEMORY_HOST); hypre_TFree(smatrices, HYPRE_MEMORY_HOST); hypre_TFree(symmetric, HYPRE_MEMORY_HOST); hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST); hypre_TFree(pmatrix, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; /* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */ /* HYPRE_Int vi, vj, d, ndim; */ #if 0 ndim = hypre_SStructPMatrixNDim(pmatrix); /* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least * one to set shared coefficients correctly, but not exactly one? */ for (d = 0; d < ndim; d++) { num_ghost[2*d] = num_ghost[2*d+1] = 1; } #endif for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]); /* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */ hypre_StructMatrixInitialize(smatrix); /* needed to get AddTo accumulation correct between processors */ hypre_StructMatrixClearGhostValues(smatrix); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; hypre_Box *box, *grow_box; HYPRE_Int *sentries; HYPRE_Int i; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; HYPRE_Int done = 0; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (hypre_IndexInBox(index, box)) { done = 1; break; } } if (!done) { grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes)); hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); hypre_CopyBox(box, grow_box); hypre_BoxGrowByIndex(grow_box, varoffset); if (hypre_IndexInBox(index, grow_box)) { hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, i, 1); break; } } hypre_BoxDestroy(grow_box); } } else { /* Set */ grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (!hypre_IndexInBox(index, box)) { hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix); hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; HYPRE_Int *sentries; HYPRE_Int i, j; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetBoxValues(smatrix, set_box, value_box, nentries, sentries, values, action, -1, 0); /* TODO: Why need DeviceSync? */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) hypre_SyncCudaDevice(hypre_handle()); #endif /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes; hypre_Box *left_box, *done_box, *int_box; hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); left_boxes = hypre_BoxArrayCreate(1, ndim); done_boxes = hypre_BoxArrayCreate(2, ndim); temp_boxes = hypre_BoxArrayCreate(0, ndim); /* done_box always points to the first box in done_boxes */ done_box = hypre_BoxArrayBox(done_boxes, 0); /* int_box always points to the second box in done_boxes */ int_box = hypre_BoxArrayBox(done_boxes, 1); hypre_CopyBox(set_box, hypre_BoxArrayBox(left_boxes, 0)); hypre_BoxArraySetSize(left_boxes, 1); hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 0); hypre_ForBoxI(i, grid_boxes) { hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 1); hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box); hypre_BoxGrowByIndex(done_box, varoffset); hypre_ForBoxI(j, left_boxes) { left_box = hypre_BoxArrayBox(left_boxes, j); hypre_IntersectBoxes(left_box, done_box, int_box); hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box, nentries, sentries, values, action, i, 1); } } hypre_BoxArrayDestroy(left_boxes); hypre_BoxArrayDestroy(done_boxes); hypre_BoxArrayDestroy(temp_boxes); } else { /* Set */ hypre_BoxArray *diff_boxes; hypre_Box *grid_box, *diff_box; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); diff_boxes = hypre_BoxArrayCreate(0, ndim); hypre_ForBoxI(i, grid_boxes) { grid_box = hypre_BoxArrayBox(grid_boxes, i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(set_box, grid_box, diff_boxes); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries, i, 1); } } hypre_BoxArrayDestroy(diff_boxes); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix ) { hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid); HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_StructMatrix *smatrix; hypre_Index varoffset; HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; hypre_StructGrid *sgrid; HYPRE_Int vi, vj, d; hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; /* if values already accumulated, just return */ if (hypre_SStructPMatrixAccumulated(pmatrix)) { return hypre_error_flag; } for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { sgrid = hypre_StructMatrixGrid(smatrix); /* assumes vi and vj vartypes are the same */ hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset); for (d = 0; d < ndim; d++) { num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d); } /* accumulate values from AddTo */ hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixNumValues(smatrix), NULL, 1, hypre_StructMatrixComm(smatrix), &comm_pkg); hypre_InitializeCommunication(comm_pkg, hypre_StructMatrixData(smatrix), hypre_StructMatrixData(smatrix), 1, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); hypre_CommInfoDestroy(comm_info); hypre_CommPkgDestroy(comm_pkg); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; hypre_SStructPMatrixAccumulate(pmatrix); for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_StructMatrixClearGhostValues(smatrix); hypre_StructMatrixAssemble(smatrix); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix, HYPRE_Int var, HYPRE_Int to_var, HYPRE_Int symmetric ) { HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix); HYPRE_Int vstart = var; HYPRE_Int vsize = 1; HYPRE_Int tstart = to_var; HYPRE_Int tsize = 1; HYPRE_Int v, t; if (var == -1) { vstart = 0; vsize = hypre_SStructPMatrixNVars(pmatrix); } if (to_var == -1) { tstart = 0; tsize = hypre_SStructPMatrixNVars(pmatrix); } for (v = vstart; v < vsize; v++) { for (t = tstart; t < tsize; t++) { pmsymmetric[v][t] = symmetric; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixPrint( const char *filename, hypre_SStructPMatrix *pmatrix, HYPRE_Int all ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; char new_filename[255]; for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj); hypre_StructMatrixPrint(new_filename, smatrix, all); } } } return hypre_error_flag; } /*========================================================================== * SStructUMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int nparts = hypre_SStructGraphNParts(graph); hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph); hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph); HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph); HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph); hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); hypre_StructGrid *sgrid; hypre_SStructStencil *stencil; HYPRE_Int *split; HYPRE_Int nvars; HYPRE_Int nrows, rowstart, nnzs ; HYPRE_Int part, var, entry, b, m, mi; HYPRE_Int *row_sizes; HYPRE_Int max_row_size; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *ghost_box; hypre_IndexRef start; hypre_Index loop_size, stride; HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR); #ifdef HYPRE_USING_OPENMP HYPRE_IJMatrixSetOMPFlag(ijmatrix, 1); /* Use OpenMP */ #endif if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { rowstart = hypre_SStructGridGhstartRank(grid); nrows = hypre_SStructGridGhlocalSize(grid) ; } else /* matrix_type == HYPRE_PARCSR */ { rowstart = hypre_SStructGridStartRank(grid); nrows = hypre_SStructGridLocalSize(grid); } /* set row sizes */ m = 0; max_row_size = 0; ghost_box = hypre_BoxCreate(ndim); row_sizes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_SetIndex(stride, 1); for (part = 0; part < nparts; part++) { nvars = hypre_SStructPGridNVars(pgrids[part]); for (var = 0; var < nvars; var++) { sgrid = hypre_SStructPGridSGrid(pgrids[part], var); stencil = stencils[part][var]; split = hypre_SStructMatrixSplit(matrix, part, var); nnzs = 0; for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++) { if (split[entry] == -1) { nnzs++; } } #if 0 /* TODO: For now, assume stencil is full/complete */ if (hypre_SStructMatrixSymmetric(matrix)) { nnzs = 2*nnzs - 1; } #endif boxes = hypre_StructGridBoxes(sgrid); hypre_ForBoxI(b, boxes) { box = hypre_BoxArrayBox(boxes, b); hypre_CopyBox(box, ghost_box); if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid)); } start = hypre_BoxIMin(box); hypre_BoxGetSize(box, loop_size); zypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size, ghost_box, start, stride, mi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop1For(mi) { row_sizes[m+mi] = nnzs; } zypre_BoxLoop1End(mi); m += hypre_BoxVolume(ghost_box); } max_row_size = hypre_max(max_row_size, nnzs); if (nvneighbors[part][var]) { max_row_size = hypre_max(max_row_size, hypre_SStructStencilSize(stencil)); } } } hypre_BoxDestroy(ghost_box); /* GEC0902 essentially for each UVentry we figure out how many extra columns * we need to add to the rowsizes */ /* RDF: THREAD? */ for (entry = 0; entry < nUventries; entry++) { mi = iUventries[entry]; m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart; if ((m > -1) && (m < nrows)) { row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]); max_row_size = hypre_max(max_row_size, row_sizes[m]); } } /* ZTODO: Update row_sizes based on neighbor off-part couplings */ HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes); hypre_TFree(row_sizes, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpSize(matrix) = max_row_size; hypre_SStructMatrixTmpRowCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpColCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpCoeffs(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpRowCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE); hypre_SStructMatrixTmpColCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE); hypre_SStructMatrixTmpCoeffsDevice(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_DEVICE); HYPRE_IJMatrixInitialize(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * 9/09 - AB: modified to use the box manager - here we need to check the * neighbor box manager also *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_Index to_index; hypre_SStructUVEntry *Uventry; hypre_BoxManEntry *boxman_entry; hypre_SStructBoxManInfo *entry_info; HYPRE_BigInt row_coord; HYPRE_BigInt *col_coords; HYPRE_Int ncoeffs; HYPRE_Complex *coeffs; HYPRE_Int i, entry; HYPRE_BigInt Uverank; HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); HYPRE_Complex *h_values; hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry); if (boxman_entry == NULL) { hypre_error_in_arg(1); hypre_error_in_arg(2); hypre_error_in_arg(3); return hypre_error_flag; } else { hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info); } hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type); col_coords = hypre_SStructMatrixTmpColCoords(matrix); coeffs = hypre_SStructMatrixTmpCoeffs(matrix); /* RL: copy values to host since the following for-loop is on CPU */ if ( hypre_GetActualMemLocation(HYPRE_MEMORY_DEVICE) != hypre_MEMORY_HOST ) { h_values = hypre_TAlloc(HYPRE_Complex, nentries, HYPRE_MEMORY_HOST); hypre_TMemcpy(h_values, values, HYPRE_Complex, nentries, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } else { h_values = values; } /* RL: TODO Port it to GPU? */ ncoeffs = 0; for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < size) { /* stencil entries */ offset = shape[entry]; hypre_AddIndexes(index, offset, ndim, to_index); hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) { hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); } if (boxman_entry != NULL) { hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index, &col_coords[ncoeffs],matrix_type); coeffs[ncoeffs] = h_values[i]; ncoeffs++; } } else { /* non-stencil entries */ entry -= size; hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank); if (Uverank > -1) { Uventry = hypre_SStructGraphUVEntry(graph, Uverank); col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry); coeffs[ncoeffs] = h_values[i]; ncoeffs++; } } } #if defined(HYPRE_USING_CUDA) HYPRE_BigInt *d_row_coords = hypre_SStructMatrixTmpRowCoordsDevice(matrix); HYPRE_BigInt *d_col_coords = hypre_SStructMatrixTmpColCoordsDevice(matrix); HYPRE_Complex *d_coeffs = hypre_SStructMatrixTmpCoeffsDevice(matrix); if ( hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(ijmatrix)) == HYPRE_EXEC_DEVICE ) { hypreDevice_BigIntFilln(d_row_coords, ncoeffs, row_coord); hypre_TMemcpy(d_col_coords, col_coords, HYPRE_BigInt, ncoeffs, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(d_coeffs, coeffs, HYPRE_Complex, ncoeffs, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, ncoeffs, NULL, d_row_coords, (const HYPRE_BigInt *) d_col_coords, (const HYPRE_Complex *) d_coeffs); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, ncoeffs, NULL, d_row_coords, (const HYPRE_BigInt *) d_col_coords, (const HYPRE_Complex *) d_coeffs); } else { // RL:TODO HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord, col_coords, values); } } else #endif { if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_BigInt *) col_coords, (const HYPRE_Complex *) coeffs); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_BigInt *) col_coords, (const HYPRE_Complex *) coeffs); } else { HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord, col_coords, values); } } if (h_values != values) { hypre_TFree(h_values, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Note: Entries must all be of type stencil or non-stencil, but not both. * * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * 9/09 - AB: modified to use the box manager- here we need to check the * neighbor box manager also * * To illustrate what is computed below before calling IJSetValues2(), consider * the following example of a 5-pt stencil (c,w,e,s,n) on a 3x2 grid (the 'x' in * arrays 'cols' and 'ijvalues' indicates "no data"): * * nrows = 6 * ncols = 3 4 3 3 4 3 * rows = 0 1 2 3 4 5 * row_indexes = 0 5 10 15 20 25 * cols = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x * ijvalues = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x * entry = c e n c w e n c w n c e s c w e s c w s *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_BoxManEntry **boxman_to_entries; HYPRE_Int nboxman_to_entries; HYPRE_Int nrows; HYPRE_Int *ncols, *row_indexes;; HYPRE_BigInt *rows, *cols; HYPRE_Complex *ijvalues; hypre_Box *box; hypre_Box *to_box; hypre_Box *map_box; hypre_Box *int_box; hypre_Index index, stride, loop_size; hypre_IndexRef start; hypre_Index rs, cs; HYPRE_BigInt row_base, col_base; HYPRE_Int ei, entry, ii, jj; HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); box = hypre_BoxCreate(ndim); /*------------------------------------------ * all stencil entries *------------------------------------------*/ if (entries[0] < size) { to_box = hypre_BoxCreate(ndim); map_box = hypre_BoxCreate(ndim); int_box = hypre_BoxCreate(ndim); nrows = hypre_BoxVolume(set_box); ncols = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); rows = hypre_CTAlloc(HYPRE_BigInt, nrows, HYPRE_MEMORY_DEVICE); row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); cols = hypre_CTAlloc(HYPRE_BigInt, nrows*nentries, HYPRE_MEMORY_DEVICE); ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows*nentries, HYPRE_MEMORY_DEVICE); hypre_SetIndex(stride, 1); hypre_SStructGridIntersect(grid, part, var, set_box, -1, &boxman_entries, &nboxman_entries); for (ii = 0; ii < nboxman_entries; ii++) { hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type); hypre_CopyBox(set_box, box); hypre_BoxManEntryGetExtents(boxman_entries[ii], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(box, map_box, int_box); hypre_CopyBox(int_box, box); /* For each index in 'box', compute a row of length <= nentries and * insert it into an nentries-length segment of 'cols' and 'ijvalues'. * This may result in gaps, but IJSetValues2() is designed for that. */ nrows = hypre_BoxVolume(box); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(ncols,row_indexes) hypre_LoopBegin(nrows, i) { ncols[i] = 0; row_indexes[i] = i*nentries; } hypre_LoopEnd() #undef DEVICE_VAR #define DEVICE_VAR for (ei = 0; ei < nentries; ei++) { entry = entries[ei]; hypre_CopyBox(box, to_box); offset = shape[entry]; hypre_BoxShiftPos(to_box, offset); hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1, &boxman_to_entries, &nboxman_to_entries); for (jj = 0; jj < nboxman_to_entries; jj++) { hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type); hypre_BoxManEntryGetExtents(boxman_to_entries[jj], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(to_box, map_box, int_box); hypre_CopyIndex(hypre_BoxIMin(int_box), index); hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj], index, &col_base, matrix_type); hypre_BoxShiftNeg(int_box, offset); hypre_CopyIndex(hypre_BoxIMin(int_box), index); hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii], index, &row_base, matrix_type); start = hypre_BoxIMin(int_box); hypre_BoxGetSize(int_box, loop_size); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) hypre_assert(ndim <= 3); HYPRE_Int rs_0, rs_1, rs_2; HYPRE_Int cs_0, cs_1, cs_2; if (ndim > 0) { rs_0 = rs[0]; cs_0 = cs[0]; } if (ndim > 1) { rs_1 = rs[1]; cs_1 = cs[1]; } if (ndim > 2) { rs_2 = rs[2]; cs_2 = cs[2]; } #endif #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(ncols,rows,cols,ijvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, box, start, stride, mi, value_box, start, stride, vi); { hypre_Index index; HYPRE_Int ci; hypre_BoxLoopGetIndex(index); ci = mi*nentries + ncols[mi]; rows[mi] = row_base; cols[ci] = col_base; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) if (ndim > 0) { rows[mi] += index[0] * rs_0; cols[ci] += index[0] * cs_0; } if (ndim > 1) { rows[mi] += index[1] * rs_1; cols[ci] += index[1] * cs_1; } if (ndim > 2) { rows[mi] += index[2] * rs_2; cols[ci] += index[2] * cs_2; } #else HYPRE_Int d; for (d = 0; d < ndim; d++) { rows[mi] += index[d]*rs[d]; cols[ci] += index[d]*cs[d]; } #endif ijvalues[ci] = values[ei + vi*nentries]; ncols[mi]++; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR } /* end loop through boxman to entries */ hypre_TFree(boxman_to_entries, HYPRE_MEMORY_HOST); } /* end of ei nentries loop */ if (action > 0) { HYPRE_IJMatrixAddToValues2(ijmatrix, nrows, ncols, (const HYPRE_BigInt *) rows, (const HYPRE_Int *) row_indexes, (const HYPRE_BigInt *) cols, (const HYPRE_Complex *) ijvalues); } else if (action > -1) { HYPRE_IJMatrixSetValues2(ijmatrix, nrows, ncols, (const HYPRE_BigInt *) rows, (const HYPRE_Int *) row_indexes, (const HYPRE_BigInt *) cols, (const HYPRE_Complex *) ijvalues); } else { HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values); } } /* end loop through boxman entries */ hypre_TFree(boxman_entries, HYPRE_MEMORY_HOST); hypre_TFree(ncols, HYPRE_MEMORY_DEVICE); hypre_TFree(rows, HYPRE_MEMORY_DEVICE); hypre_TFree(row_indexes, HYPRE_MEMORY_DEVICE); hypre_TFree(cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijvalues, HYPRE_MEMORY_DEVICE); hypre_BoxDestroy(to_box); hypre_BoxDestroy(map_box); hypre_BoxDestroy(int_box); } /*------------------------------------------ * non-stencil entries *------------------------------------------*/ else { /* RDF: THREAD (Check safety on UMatrixSetValues call) */ hypre_BoxGetSize(set_box, loop_size); hypre_SerialBoxLoop0Begin(ndim, loop_size); { zypre_BoxLoopGetIndex(index); hypre_AddIndexes(index, hypre_BoxIMin(set_box), ndim, index); hypre_SStructUMatrixSetValues(matrix, part, index, var, nentries, entries, values, action); values += nentries; } hypre_SerialBoxLoop0End(); } hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_IJMatrixAssemble(ijmatrix); HYPRE_IJMatrixGetObject( ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix)); return hypre_error_flag; } /*========================================================================== * SStructMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixRef( hypre_SStructMatrix *matrix, hypre_SStructMatrix **matrix_ref ) { hypre_SStructMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix, HYPRE_Int part, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Int *nSentries_ptr, HYPRE_Int **Sentries_ptr, HYPRE_Int *nUentries_ptr, HYPRE_Int **Uentries_ptr ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int entry; HYPRE_Int i; HYPRE_Int nSentries = 0; HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix); HYPRE_Int nUentries = 0; HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix); for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < hypre_SStructStencilSize(stencil)) { /* stencil entries */ if (split[entry] > -1) { Sentries[nSentries] = split[entry]; nSentries++; } else { Uentries[nUentries] = entry; nUentries++; } } else { /* non-stencil entries */ Uentries[nUentries] = entry; nUentries++; } } *nSentries_ptr = nSentries; *Sentries_ptr = Sentries; *nUentries_ptr = nUentries; *Uentries_ptr = Uentries; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, HYPRE_Int *index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_Index cindex; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); hypre_CopyToCleanIndex(index, ndim, cindex); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetValues(pmatrix, cindex, var, nSentries, Sentries, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_Box *set_box; HYPRE_Int d; /* This creates boxes with zeroed-out extents */ set_box = hypre_BoxCreate(ndim); for (d = 0; d < ndim; d++) { hypre_BoxIMinD(set_box, d) = cindex[d]; hypre_BoxIMaxD(set_box, d) = cindex[d]; } hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries, set_box, values, action); hypre_BoxDestroy(set_box); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetValues(matrix, part, cindex, var, nUentries, Uentries, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetBoxValues(pmatrix, set_box, var, nSentries, Sentries, value_box, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries, value_box, values, action); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetBoxValues(matrix, part, set_box, var, nUentries, Uentries, value_box, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in * ghost zones). Assumes that all entries are stencil entries. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructPMatrix *pmatrix; hypre_SStructPGrid *pgrid; hypre_SStructStencil *stencil; hypre_Index *shape; HYPRE_Int *smap; HYPRE_Int *vars, frvartype, tovartype; hypre_StructMatrix *smatrix; hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox; hypre_Index stride, loop_size; hypre_IndexRef offset, start; hypre_BoxManEntry **frentries, **toentries; hypre_SStructBoxManInfo *frinfo, *toinfo; HYPRE_Complex *tvalues = NULL; HYPRE_Int tvalues_size = 0; HYPRE_Int nfrentries, ntoentries, frpart, topart; HYPRE_Int entry, sentry, ei, fri, toi; pmatrix = hypre_SStructMatrixPMatrix(matrix, part); pgrid = hypre_SStructPMatrixPGrid(pmatrix); frvartype = hypre_SStructPGridVarType(pgrid, var); box = hypre_BoxCreate(ndim); ibox0 = hypre_BoxCreate(ndim); ibox1 = hypre_BoxCreate(ndim); tobox = hypre_BoxCreate(ndim); frbox = hypre_BoxCreate(ndim); stencil = hypre_SStructPMatrixStencil(pmatrix, var); smap = hypre_SStructPMatrixSMap(pmatrix, var); shape = hypre_SStructStencilShape(stencil); vars = hypre_SStructStencilVars(stencil); hypre_SetIndex(stride, 1); for (ei = 0; ei < nentries; ei++) { entry = entries[ei]; sentry = smap[entry]; offset = shape[entry]; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]); tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]); /* shift box in the stencil offset direction */ hypre_CopyBox(set_box, box); hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box)); hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box)); /* get "to" entries */ hypre_SStructGridIntersect(grid, part, vars[entry], box, -1, &toentries, &ntoentries); for (toi = 0; toi < ntoentries; toi++) { hypre_BoxManEntryGetExtents( toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox)); hypre_IntersectBoxes(box, tobox, ibox0); if (hypre_BoxVolume(ibox0)) { hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart); /* shift ibox0 back */ hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim, hypre_BoxIMin(ibox0)); hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim, hypre_BoxIMax(ibox0)); /* get "from" entries */ hypre_SStructGridIntersect(grid, part, var, ibox0, -1, &frentries, &nfrentries); for (fri = 0; fri < nfrentries; fri++) { /* don't set couplings within the same part unless possibly for * cell data (to simplify periodic conditions for users) */ hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart); if (topart == frpart) { if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) || (tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) ) { continue; } hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo); hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo); if ( hypre_SStructBoxManInfoType(frinfo) == hypre_SStructBoxManInfoType(toinfo) ) { continue; } } hypre_BoxManEntryGetExtents( frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox)); hypre_IntersectBoxes(ibox0, frbox, ibox1); if (hypre_BoxVolume(ibox1)) { HYPRE_Int tvalues_new_size = hypre_BoxVolume(ibox1); tvalues = hypre_TReAlloc_v2(tvalues, HYPRE_Complex, tvalues_size, HYPRE_Complex, tvalues_new_size, HYPRE_MEMORY_DEVICE); tvalues_size = tvalues_new_size; if (action >= 0) { /* set or add */ /* copy values into tvalues */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { tvalues[mi] = values[ei + vi*nentries]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR /* put values into UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* zero out values in PMatrix (possibly in ghost) */ hypre_StructMatrixClearBoxValues( smatrix, ibox1, 1, &sentry, -1, 1); } else { /* get */ /* get values from UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* copy tvalues into values */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { values[ei + vi*nentries] = tvalues[mi]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR } /* end if action */ } /* end if nonzero ibox1 */ } /* end of "from" boxman entries loop */ hypre_TFree(frentries, HYPRE_MEMORY_HOST); } /* end if nonzero ibox0 */ } /* end of "to" boxman entries loop */ hypre_TFree(toentries, HYPRE_MEMORY_HOST); } /* end of entries loop */ hypre_BoxDestroy(box); hypre_BoxDestroy(ibox0); hypre_BoxDestroy(ibox1); hypre_BoxDestroy(tobox); hypre_BoxDestroy(frbox); hypre_TFree(tvalues, HYPRE_MEMORY_DEVICE); return hypre_error_flag; }
Triangular_CSC.h
// // Created by kazem on 7/18/17. // #ifndef TRIANGOPENMP_TRIANGULAR_CSC_H #define TRIANGOPENMP_TRIANGULAR_CSC_H #include <immintrin.h> #include "../common/Reach.h" namespace nasoq { /* ****** Serial implementation */ int lsolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; } } return (1); } /* * L^T x = b */ int ltsolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = n - 1; j >= 0; j--) { for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[j] -= Lx[p] * x[Li[p]]; } x[j] /= Lx[Lp[j]]; } return (1); } /* * Counting the number of FLOPS in triangular solve */ unsigned long flopCoutLSolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; unsigned long flopCount = 0; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; flopCount++; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; flopCount += 2; } } return (flopCount); } /* ****** Parallel */ int lsolvePar(int n, int *Lp, int *Li, double *Lx, double *x, int levels, int *levelPtr, int *levelSet, int chunk) { if (!Lp || !Li || !x) return (0); /* check inputs */ for (int l = 0; l < levels; ++l) { int li = 0; #pragma omp parallel for \ default(shared) private(li) \ schedule(static) for (li = levelPtr[l]; li < levelPtr[l + 1]; ++li) { int j = levelSet[li]; x[j] /= Lx[Lp[j]]; for (int p = Lp[j] + 1; p < Lp[j + 1]; p++) { double tmp = Lx[p] * x[j]; int idx = Li[p]; #pragma omp atomic x[idx] -= tmp; } } } return (1); } /* ****** Parallel H2 */ int lsolveParH2(int n, int *Lp, int *Li, double *Lx, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk) { if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = 0; i1 < levels; ++i1) { #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int j = partition[k1]; x[j] /= Lx[Lp[j]]; // #pragma omp critical for (int p = Lp[j] + 1; p < Lp[j + 1]; p++) { double tmp = Lx[p] * x[j]; int idx = Li[p]; #pragma omp atomic x[idx] -= tmp; } } } } } return (1); } /* * */ int lsolvePar2(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; //#pragma omp parallel for for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; } } return (1); } /* * Vectorized implementation */ #if 0 typedef union { __m256d v; double d[4]; } v4df_t; int lsolveVectorize(int n, int* Lp, int* Li, const double* Lx, double *x) { double xx; v4df_t reg_Lx; v4df_t reg_x; v4df_t result0, result1, result2, result3; int mod=0; #if 0 for (int k = st ; k < bd1inReach ; k++) { j = reach[k]; xx=x [j]; xx /= Lx [Lp [j]] ; for (p = Lp [j]+1 ; p < Lp [j+1] ; p++) { x [Li [p]] -= Lx [p] * xx; } x[j]=xx; } #endif #if 0 for (int k = bd1 ; k < bd2 ; k++) { xx = x[k]; xx /= Lx[Lp [k]]; reg_x.v = _mm256_set1_pd(xx); mod = (Lp [k+1] - Lp [k] - 1) % 4; for (int i1 = Lp [k] + 1; i1 < Lp [k+1] - mod; i1 += 4) { reg_Lx.v = _mm256_load_pd((double *) (Lx + i1)); result0.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); x[Li[i1]] -= result0.d[0]; x[Li[i1 + 1]] -= result0.d[1]; x[Li[i1 + 2]] -= result0.d[2]; x[Li[i1 + 3]] -= result0.d[3]; } for (int i1 = Lp [k+1] - mod; i1 < Lp [k+1]; ++i1) { x[Li[i1]] -= Lx[i1] * xx; } x[k] = xx; } #endif #if 1 for (int k = 0 ; k < n ; k++) { xx = x[k]; xx /= Lx[Lp [k]]; if(xx != 0){ reg_x.v = _mm256_set1_pd(xx); mod = (Lp [k+1] - Lp [k] - 1) % 16; for (int i1 = Lp [k] + 1; i1 < Lp [k+1] - mod; i1 += 16) { reg_Lx.v = _mm256_load_pd((double *) (Lx + i1)); result0.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+4)); result1.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+8)); result2.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+12)); result3.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); x[Li[i1]] -= result0.d[0]; x[Li[i1 + 1]] -= result0.d[1]; x[Li[i1 + 2]] -= result0.d[2]; x[Li[i1 + 3]] -= result0.d[3]; x[Li[i1 + 4]] -= result1.d[0]; x[Li[i1 + 5]] -= result1.d[1]; x[Li[i1 + 6]] -= result1.d[2]; x[Li[i1 + 7]] -= result1.d[3]; x[Li[i1 + 8]] -= result2.d[0]; x[Li[i1 + 9]] -= result2.d[1]; x[Li[i1 + 10]] -= result2.d[2]; x[Li[i1 + 11]] -= result2.d[3]; x[Li[i1 + 12]] -= result3.d[0]; x[Li[i1 + 13]] -= result3.d[1]; x[Li[i1 + 14]] -= result3.d[2]; x[Li[i1 + 15]] -= result3.d[3]; } for (int i1 = Lp [k+1] - mod; i1 < Lp [k+1]; ++i1) { x[Li[i1]] -= Lx[i1] * xx; } x[k] = xx; } } #endif } #endif /* * Pruned */ int lsolve_reach_dec(int n, int *Gp, int *Gi, double *Gx, int *Bp, int *Bi, double *Bx, int k, int *xi, double *x, const int *pinv, double &symDuration) { int j, p, px, top; std::chrono::time_point<std::chrono::system_clock> start, end; top = reach(n, Gp, Gi, Bp, Bi, k, xi, pinv); start = std::chrono::system_clock::now(); for (px = top; px < n; px++) { j = xi[px]; x[j] /= Gx[(Gp[j])]; p = Gp[j] + 1; for (; p < Gp[j + 1]; p++) { x[Gi[p]] -= Gx[p] * x[j]; } } end = std::chrono::system_clock::now(); std::chrono::duration<double> tmp = end - start; symDuration = tmp.count(); return (top); } /* * only for motive example */ void lSolveSympiler(int n, int *Lp, int *Li, const double *Lx, double *x, int *reachSet, int reachSetSize) { int p, px, j; x[0] /= Lx[0]; // Peel col 0 double x_Li_1 = Lx[1] * x[0]; double x_Li_2 = Lx[2] * x[0]; x[*(Li + 1)] -= x_Li_1; x[*(Li + 2)] -= x_Li_2; for (px = 1; px < 3; px++) { j = reachSet[px]; x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) x[Li[p]] -= Lx[p] * x[j]; } x[7] /= Lx[20]; // Peel col 7 double x_Li_21 = Lx[21] * x[7]; double x_Li_22 = Lx[22] * x[7]; x[*(Li + 21)] -= x_Li_21; x[*(Li + 22)] -= x_Li_22; for (px = 4; px < reachSetSize; px++) { j = reachSet[px]; x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) x[Li[p]] -= Lx[p] * x[j]; } } } #endif //TRIANGOPENMP_TRIANGULAR_CSC_H
idaFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDA: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDA using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idaFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idaFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ida/ida.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *) ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidaFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDA \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
target_teams_distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd foo void test_no_clause() { int i; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp target teams distribute simd' must be a for loop}} #pragma omp target teams distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd; for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} #pragma omp target teams distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp target teams distribute simd collapse(2) firstprivate(i) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp parallel for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
bitonic_sort_hybrid.c
#include <stdio.h> #include <time.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <stdbool.h> #include <omp.h> #define MASTER 0 // Globals double timer_start; double timer_end; int process_rank; int num_processes; int *arr; int n; void init() { for (int i = 0; i < n; i++) arr[i] = rand() % n; } int ComparisonFunc(const void * a, const void * b) { return ( * (int *)a - * (int *)b ); } /* Sends the biggest of the list and receive the smallest of the list */ void CompareLow(int j) { int i, min; // Exchange with a neighbor whose (d-bit binary) processor number differs only at the jth bit. int send_counter = 0; int * buffer_send = malloc((n + 1) * sizeof(int)); MPI_Send( &arr[n - 1], // entire arr 1, // one data item MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD // default comm. ); // Receive new min of sorted numbers int * buffer_receive = malloc((n + 1) * sizeof(int)); MPI_Recv( &min, // buffer the message 1, // one data item MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD, // default comm. MPI_STATUS_IGNORE // ignore info about message received ); // Buffers all values which are greater than min send from H Process. #pragma omp parallel for shared(arr, min, send_counter) private(i) for (i = 0; i < n; i++) { if (arr[i] > min) { buffer_send[send_counter + 1] = arr[i]; send_counter++; } else { #pragma omp cancel for } } buffer_send[0] = send_counter; // send partition to paired H process MPI_Send( buffer_send, // Send values that are greater than min send_counter, // # of items sent MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD // default comm. ); // receive info from paired H process MPI_Recv( buffer_receive, // buffer the message n, // whole arr MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD, // default comm. MPI_STATUS_IGNORE // ignore info about message received ); // Take received buffer of values from H Process which are smaller than current max #pragma omp parallel for shared(arr) private(i) for (i = 1; i < buffer_receive[0] + 1; i++) { if (arr[n - 1] < buffer_receive[i]) { // Store value from message arr[n - 1] = buffer_receive[i]; } else { #pragma omp cancel for } } qsort(arr, n, sizeof(int), ComparisonFunc); // Reset the state of the heap from Malloc free(buffer_send); free(buffer_receive); return; } void CompareHigh(int j) { int i, max; // Receive max from L Process's entire arr int recv_counter; int * buffer_receive = malloc((n + 1) * sizeof(int)); MPI_Recv( &max, // buffer max value 1, // one item MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD, // default comm. MPI_STATUS_IGNORE // ignore info about message received ); // Send min to L Process of current process's arr int send_counter = 0; int * buffer_send = malloc((n + 1) * sizeof(int)); MPI_Send( &arr[0], // send min 1, // one item MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD // default comm. ); // Buffer a list of values which are smaller than max value #pragma omp parallel for shared(arr, max, send_counter) private(i) for (i = 0; i < n; i++) { if (arr[i] < max) { buffer_send[send_counter + 1] = arr[i]; send_counter++; } else { #pragma omp cancel for } } // Receive blocks greater than min from paired slave MPI_Recv( buffer_receive, // buffer message n, // whole arr MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD, // default comm. MPI_STATUS_IGNORE // ignore info about message receiveds ); recv_counter = buffer_receive[0]; // send partition to paired slave buffer_send[0] = send_counter; MPI_Send( buffer_send, // all items smaller than max value send_counter, // # of values smaller than max MPI_INT, // INT process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions 0, // tag 0 MPI_COMM_WORLD // default comm. ); // Take received buffer of values from L Process which are greater than current min #pragma omp parallel for shared(arr) private(i) for (i = 1; i < recv_counter + 1; i++) { if (buffer_receive[i] > arr[0]) { // Store value from message arr[0] = buffer_receive[i]; } else { #pragma omp cancel for } } qsort(arr, n, sizeof(int), ComparisonFunc); // Reset the state of the heap from Malloc free(buffer_send); free(buffer_receive); return; } void test() { bool pass = true; for (int i = 1; i < n; i++) { pass &= (arr[i - 1] <= arr[i]); } printf("TEST %s\n", (pass) ? "PASSED!" : "FAILED!"); } int main(int argc, char * argv[]) { int i, j; // Initialization, get # of processes & this PID/rank MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_processes); MPI_Comm_rank(MPI_COMM_WORLD, &process_rank); // Initialize arr for Storing Random Numbers n = (1 << atoi(argv[1])); arr = (int *) malloc(n * sizeof(int)); init(); // Blocks until all processes have finished generating MPI_Barrier(MPI_COMM_WORLD); // Cube Dimension int dimensions = (int)(log2(num_processes)); // Start Timer before starting first sort operation (first iteration) if (process_rank == MASTER) { printf("Number of Processes spawned: %d\n", num_processes); timer_start = MPI_Wtime(); } qsort(arr, n, sizeof(int), ComparisonFunc); // Bitonic Sort follows for (i = 0; i < dimensions; i++) { for (j = i; j >= 0; j--) { // (window_id is even AND jth bit of process is 0) // OR (window_id is odd AND jth bit of process is 1) if (((process_rank >> (i + 1)) % 2 == 0 && (process_rank >> j) % 2 == 0) || ((process_rank >> (i + 1)) % 2 != 0 && (process_rank >> j) % 2 != 0)) { CompareLow(j); } else { CompareHigh(j); } } } // Blocks until all processes have finished sorting MPI_Barrier(MPI_COMM_WORLD); if (process_rank == MASTER) { timer_end = MPI_Wtime(); test(); printf("Time Elapsed (Sec): %f\n", timer_end - timer_start); } free(arr); // Done MPI_Finalize(); return 0; }
GraphCutParallel.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BKTOOLS_GRAPHCUTPARALLEL_H #define BKTOOLS_GRAPHCUTPARALLEL_H #if defined(__GNUC__) && defined(_OPENMP) #include <parallel/algorithm> #else #include <algorithm> #endif #include <array> #include <chrono> #include <cstdint> #include <iostream> #include <limits> #include <numeric> #include <utility> #include <tuple> #include <type_traits> #include <vector> #include <bkTools/graphcut/GraphCutBase.h> #include <bkTools/graphcut/GraphCutParallelBlock.h> #include <bkTools/graphcut/Edge.h> #include <bkTools/graphcut/gc_definitions.h> #ifdef BK_EMIT_PROGRESS #include <bkTools/progress/Progress.h> #include <bkTools/progress/GlobalProgressManager.h> #include <bkTools/localization/GlobalLocalizationManager.h> #endif namespace bk { template<int TDims> class GraphCutParallel : public gc_details::GraphCutBase<TDims, GraphCutParallel<TDims>> { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = GraphCutParallel<TDims>; using base_type = gc_details::GraphCutBase<TDims, GraphCutParallel<TDims>>; using gc = typename base_type::gc; using edge_type = gc_details::Edge<TDims>; using block_type = gc_details::GraphCutParallelBlock<TDims>; public: using flag_type = typename base_type::flag_type; using id_type = typename base_type::id_type; friend base_type; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR constexpr GraphCutParallel() = default; constexpr GraphCutParallel(const self_type&) = default; constexpr GraphCutParallel(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~GraphCutParallel() = default; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] self_type& operator=(const self_type&) = default; [[maybe_unused]] self_type& operator=(self_type&&) noexcept = default; /// @} private: /// @{ -------------------------------------------------- SET ACTIVE void _set_active(const id_type& node) { flag_type& fc = this->flag(node); if (!(fc & gc::FLAG_ORPHAN()) && !(fc & gc::FLAG_FREE_SET())) { fc |= gc::FLAG_ACTIVE(); } } /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- HELPER: RESET_IMPL static constexpr void reset_impl() { block_type::reset_id_counter(); } /// @} /// @{ -------------------------------------------------- HELPER: INIT BLOCKS template<int I> void _init_blocks(id_type& p, const id_type& numBlocks, std::vector<block_type>& blocks, std::vector<edge_type>& edges, unsigned int& edgecnt, unsigned int& blockidcnt) { for (int x = 0; x < numBlocks[I]; ++x) { p[I] = x; if constexpr (I != TDims - 1) { _init_blocks<I + 1>(p, numBlocks, blocks, edges, edgecnt, blockidcnt); } else { id_type size_from; for (int dimId = 0; dimId < TDims; ++dimId) { size_from[dimId] = p[dimId] * gc::BLOCK_SIZE(dimId); } blocks.emplace_back(this->_size, size_from, /*timestamp_init=*/1, this->_residual, this->_distance_to_terminal, this->_timestamp, this->_flags); for (int dimId = 0; dimId < TDims; ++dimId) { if (p[dimId] < numBlocks[dimId] - 1) /// edge to successor { edge_type& e = edges[edgecnt++]; e.blockID0 = blockidcnt; e.blockID1 = blockidcnt; unsigned int blockID1offset = 1; for (int k = dimId + 1; k < TDims; ++k) { blockID1offset *= numBlocks[k]; } e.blockID1 += blockID1offset; for (int k = 0; k < TDims; ++k) { if (k == dimId) { e.size_to[k] = (p[k] + 1) * gc::BLOCK_SIZE(k); e.size_from[k] = e.size_to[k] - 1; } else { e.size_from[k] = p[k] * gc::BLOCK_SIZE(k); e.size_to[k] = std::min(this->_size[k], e.size_from[k] + gc::BLOCK_SIZE(k)); } } } } ++blockidcnt; } } // for x } /// @} /// @{ -------------------------------------------------- HELPER: COUNT POTENTIAL AUGMENTATIONS template<int I, int skipDim> void _count_potential_augmentations(const edge_type& e, id_type& p, unsigned int& score) const { if constexpr (I == skipDim && I != TDims - 1) { _count_potential_augmentations<I + 1, skipDim>(e, p, score); } else { for (int x = e.size_from[I]; x < e.size_to[I]; ++x) { p[I] = x; const id_type neigh = gc::NEIGHBOR_SUCCESSOR(p, skipDim); if (!this->are_in_same_set(p, neigh) && !this->is_in_free_set(p) && !this->is_in_free_set(neigh)) { ++score; } } } } template<int I = 0> [[nodiscard]] unsigned int count_potential_augmentations(const edge_type& e) const { if constexpr (I < TDims) { if (e.size_to[I] - e.size_from[I] == 1) { id_type p; p.fill(0); p[I] = e.size_from[I]; unsigned int score = 0; _count_potential_augmentations<0, I>(e, p, score); if (score != 0) { return score; } } else { return count_potential_augmentations<I + 1>(e); } } return 0U; } /// @} /// @{ -------------------------------------------------- HELPER: ACTIVATE NODES FOR POTENTIAL AUGMENTATION ON EDGE template<int I, int skipDim> void __activate_nodes_for_potential_augmentation_on_edge(const edge_type& e, id_type& p) { if constexpr (I != TDims - 1) { _activate_nodes_for_potential_augmentation_on_edge<I + 1, skipDim, false>(e, p); } else { const id_type neigh = gc::NEIGHBOR_SUCCESSOR(p, skipDim); if (!this->are_in_same_set(p, neigh)) { _set_active(p); _set_active(neigh); } } } template<int I, int skipDim, bool first = true> void _activate_nodes_for_potential_augmentation_on_edge(const edge_type& e, id_type& p) { if constexpr (I == skipDim && I != TDims - 1) { _activate_nodes_for_potential_augmentation_on_edge<I + 1, skipDim, first>(e, p); } else { if constexpr (first) { #pragma omp parallel for for (int x = e.size_from[I]; x < e.size_to[I]; ++x) { p[I] = x; __activate_nodes_for_potential_augmentation_on_edge<I, skipDim>(e, p); } } else { for (int x = e.size_from[I]; x < e.size_to[I]; ++x) { p[I] = x; __activate_nodes_for_potential_augmentation_on_edge<I, skipDim>(e, p); } } } } template<int I = 0> void activate_nodes_for_potential_augmentation_on_edge(const edge_type& e) { if constexpr (I < TDims) { if (e.size_to[I] - e.size_from[I] == 1) { id_type p; p[I] = e.size_from[I]; _activate_nodes_for_potential_augmentation_on_edge<0, I>(e, p); } else { activate_nodes_for_potential_augmentation_on_edge<I + 1>(e); } } } /// @} public: /// @{ -------------------------------------------------- RUN void run() { /* * validity check */ if (this->_up2date) { return; } const bool anySizeIsZero = 0 == std::accumulate(this->_size.begin(), this->_size.end(), 1, [](int x, int y) { return x * y; }); if (anySizeIsZero || this->_connected_to_source.empty() || this->_connected_to_sink.empty()) { std::cerr << "init graph cut first" << std::endl; return; } /* * determine num blocks per dimension * and num blocks total */ id_type numBlocks; int numBlocksTotal = 1; for (int dimId = 0; dimId < TDims; ++dimId) { numBlocks[dimId] = (this->_size[dimId] / gc::BLOCK_SIZE(dimId)) + (this->_size[dimId] % gc::BLOCK_SIZE(dimId) ? 1 : 0); numBlocksTotal *= numBlocks[dimId]; } /* * determine num edges total */ int numEdgesTotal = 0; for (int dimId = 0; dimId < TDims; ++dimId) { int tempNumEdges = numBlocks[dimId] - 1; for (int k = 0; k < dimId; ++k) { tempNumEdges *= numBlocks[k]; } for (int k = dimId + 1; k < TDims; ++k) { tempNumEdges *= numBlocks[k]; } numEdgesTotal += tempNumEdges; } #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(8 + numBlocksTotal + numEdgesTotal, ___("Performing graph cut")); #endif this->reset(); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif constexpr int timestamp_init = 1; const std::chrono::system_clock::time_point clock_start = std::chrono::system_clock::now(); // todo /* * divide image into blocks */ block_type::reset_id_counter(); std::vector<block_type> blocks; std::vector<edge_type> edges; #pragma omp parallel sections { #pragma omp section { for (unsigned int i = 0; i < this->_connected_to_source.size(); ++i) { const id_type& s = this->_connected_to_source[i]; this->set_source_as_parent(s); _set_active(s); this->distance_to_terminal(s) = 0; this->timestamp(s) = timestamp_init; } #pragma omp critical (gc_progress) { #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif } for (unsigned int i = 0; i < this->_connected_to_sink.size(); ++i) { const id_type& s = this->_connected_to_sink[i]; this->set_sink_as_parent(s); _set_active(s); this->distance_to_terminal(s) = 0; this->timestamp(s) = timestamp_init; } #pragma omp critical (gc_progress) { #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif } } // omp section #pragma omp section { blocks.reserve(2 * numBlocksTotal); edges.resize(numEdgesTotal); unsigned int edgecnt = 0; unsigned int blockidcnt = 0; for (int x = 0; x < numBlocks[0]; ++x) { id_type p; p.fill(0); p[0] = x; _init_blocks<1>(p, numBlocks, blocks, edges, edgecnt, blockidcnt); } #pragma omp critical (gc_progress) { #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif } } // omp section }; // omp parallel sections //------------------------------------------------------------------------------------------------------ // Phase 1: Uniform Partitioning //------------------------------------------------------------------------------------------------------ /* * process blocks */ #ifdef BK_EMIT_PROGRESS int cnt = 0; #endif #pragma omp parallel for schedule(dynamic, 1) for (unsigned int blockid = 0; blockid < blocks.size(); ++blockid) { blocks[blockid].run(); #ifdef BK_EMIT_PROGRESS prog.increment(1); ++cnt; #endif } #ifdef BK_EMIT_PROGRESS prog.increment(numBlocksTotal-cnt); #endif //------------------------------------------------------------------------------------------------------ // Phase 2 preparation //------------------------------------------------------------------------------------------------------ // this can happen in rare cases where a block consists only of one rows/cols/slices edges.erase(std::remove_if(edges.begin(), edges.end(), [&](const edge_type& e) { for (int dimId = 0; dimId < TDims; ++dimId) { if (e.size_from[dimId] >= this->_size[dimId] - 1) { return true; } } return false; }), edges.end()); /* * count potential augmentations on edges */ #pragma omp parallel for for (unsigned int i = 0; i < edges.size(); ++i) { edge_type& e = edges[i]; e.score = count_potential_augmentations<0>(e); } #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif /* * sort edge scores (descending) */ #if defined(__GNUC__) && defined(_OPENMP) __gnu_parallel #else std #endif ::sort(edges.begin(), edges.end(), [](const edge_type& a, const edge_type& b) -> bool { return b.score < a.score; }); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif //------------------------------------------------------------------------------------------------------ // Phase 2: Adaptive Merging //------------------------------------------------------------------------------------------------------ #pragma omp parallel for num_threads(gc::NUM_THREADS()) for (unsigned int t = 0; t < gc::NUM_THREADS(); ++t) { block_type* block0 = nullptr; block_type* block1 = nullptr; block_type* newblock = nullptr; unsigned int block0id = 0; unsigned int block1id = 0; std::list<edge_type> current_edges; unsigned int newBlockid = 0; while (true) { current_edges.clear(); #pragma omp critical (edge_lock) { for (auto it = edges.begin(); it != edges.end(); ++it) { block0 = &blocks[it->blockID0]; block1 = &blocks[it->blockID1]; if (!block0->_locked && !block1->_locked) { block0id = block0->_id; block1id = block1->_id; #pragma omp parallel sections { #pragma omp section { /* * searching further edges that combine the same blocks * e.g.: -------------------- * | | | * (----)------- * | (|) | | * -------------------- * | | | | * -------------------- */ current_edges.push_back(*it); it = edges.erase(it); while (it != edges.end()) { if ((it->blockID0 == block0id && it->blockID1 == block1id) || (it->blockID0 == block1id && it->blockID1 == block0id)) { current_edges.push_back(*it); it = edges.erase(it); } else { ++it; } } } // omp section #pragma omp section { blocks.emplace_back(std::move(block0->join(*block1))); newblock = &blocks.back(); newBlockid = newblock->_id; newblock->_locked = true; } // omp section }; // omp parallel sections #pragma omp parallel for for (unsigned int i = 0; i < edges.size(); ++i) { edge_type& e = edges[i]; if (e.blockID0 == block0id || e.blockID0 == block1id) { e.blockID0 = newBlockid; } else if (e.blockID1 == block0id || e.blockID1 == block1id) { e.blockID1 = newBlockid; } } break; // for edges } // if blocks not locked } // for edges } // lock if (current_edges.empty()) { break; } /* * active nodes for potential augmentation on edge */ for (const auto& e: current_edges) { activate_nodes_for_potential_augmentation_on_edge(e); } newblock->run(); #pragma omp critical (edge_lock) { newblock->_locked = false; } #pragma omp critical (gc_progress) { #ifdef BK_EMIT_PROGRESS prog.increment(current_edges.size()); #endif } } // while true } // for threads #pragma omp parallel for for (unsigned int i = 0; i < this->_connected_to_source.size(); ++i) { this->set_source_set(this->_connected_to_source[i]); } #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif #pragma omp parallel for for (unsigned int i = 0; i < this->_connected_to_sink.size(); ++i) { this->set_sink_set(this->_connected_to_sink[i]); } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif const std::chrono::system_clock::time_point clock_stop = std::chrono::system_clock::now(); const unsigned int time_in_sec = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::seconds>(clock_stop - clock_start).count()); const unsigned int time_in_ms = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::milliseconds>(clock_stop - clock_start).count()); std::cout << "graph cut finished in " << time_in_sec << " s (" << time_in_ms << " ms)" << std::endl; this->_up2date = true; } /// @} }; // class GraphCutParallel } // namespace bk #endif //BKTOOLS_GRAPHCUTPARALLEL_H
GB_unop__identity_fc64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_int32) // op(A') function: GB (_unop_tran__identity_fc64_int32) // C type: GxB_FC64_t // A type: int32_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_int32) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__min_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_uint32 // A.*B function (eWiseMult): GB_AemultB__min_uint32 // A*D function (colscale): GB_AxD__min_uint32 // D*A function (rowscale): GB_DxB__min_uint32 // C+=B function (dense accum): GB_Cdense_accumB__min_uint32 // C+=b function (dense accum): GB_Cdense_accumb__min_uint32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_uint32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_uint32 // C=scalar+B GB_bind1st__min_uint32 // C=scalar+B' GB_bind1st_tran__min_uint32 // C=A+scalar GB_bind2nd__min_uint32 // C=A'+scalar GB_bind2nd_tran__min_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT32 || GxB_NO_MIN_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_uint64 // A.*B function (eWiseMult): GB_AemultB__bget_uint64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bget_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint64 // C=scalar+B GB_bind1st__bget_uint64 // C=scalar+B' GB_bind1st_tran__bget_uint64 // C=A+scalar GB_bind2nd__bget_uint64 // C=A'+scalar GB_bind2nd_tran__bget_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_BITGET (aij, bij, uint64_t, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, uint64_t, 64) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT64 || GxB_NO_BGET_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bget_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, uint64_t, 64) ; \ } GrB_Info GB_bind1st_tran__bget_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, uint64_t, 64) ; \ } GrB_Info GB_bind2nd_tran__bget_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__first_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_fc32 // A.*B function (eWiseMult): GB_AemultB__first_fc32 // A*D function (colscale): GB_AxD__first_fc32 // D*A function (rowscale): GB_DxB__first_fc32 // C+=B function (dense accum): GB_Cdense_accumB__first_fc32 // C+=b function (dense accum): GB_Cdense_accumb__first_fc32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fc32 // C=scalar+B GB_bind1st__first_fc32 // C=scalar+B' GB_bind1st_tran__first_fc32 // C=A+scalar (none) // C=A'+scalar (none) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__first_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unop__expm1_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc64_fc64) // op(A') function: GB (_unop_tran__expm1_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexpm1 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexpm1 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
t_cholmod_gpu.c
/* ========================================================================== */ /* === GPU/t_cholmod_gpu ==================================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* GPU BLAS template routine for cholmod_super_numeric. */ /* ========================================================================== */ /* === include files and definitions ======================================== */ /* ========================================================================== */ #ifdef GPU_BLAS #include <string.h> #include "cholmod_template.h" #undef L_ENTRY #ifdef REAL #define L_ENTRY 1 #else #define L_ENTRY 2 #endif /* ========================================================================== */ /* === gpu_clear_memory ===================================================== */ /* ========================================================================== */ /* * Ensure the Lx is zeroed before forming factor. This is a significant cost * in the GPU case - so using this parallel memset code for efficiency. */ void TEMPLATE2 (CHOLMOD (gpu_clear_memory)) ( float* buff, size_t size, int num_threads ) { int chunk_multiplier = 5; int num_chunks = chunk_multiplier * num_threads; size_t chunksize = size / num_chunks; size_t i; #pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic) for(i = 0; i < num_chunks; i++) { size_t chunkoffset = i * chunksize; if(i == num_chunks - 1) { memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) * sizeof(float)); } else { memset(buff + chunkoffset, 0, chunksize * sizeof(float)); } } } /* ========================================================================== */ /* === gpu_init ============================================================= */ /* ========================================================================== */ /* * Performs required initialization for GPU computing. * * Returns 0 if there is an error, so the intended use is * * useGPU = CHOLMOD(gpu_init) * * which would locally turn off gpu processing if the initialization failed. */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( void *Cwork, cholmod_factor *L, cholmod_common *Common, Int nsuper, Int n, Int nls, cholmod_gpu_pointers *gpu_p ) { Int i, k, maxSize ; cublasStatus_t cublasError ; cudaError_t cudaErr ; size_t maxBytesSize, HostPinnedSize ; feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW ); maxSize = L->maxcsize; /* #define PAGE_SIZE (4*1024) */ CHOLMOD_GPU_PRINTF (("gpu_init : %p\n", (void *) ((size_t) Cwork & ~(4*1024-1)))) ; /* make sure the assumed buffer sizes are large enough */ if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) { ERROR (CHOLMOD_GPU_PROBLEM,"\n\n" "GPU Memory allocation error. Ls, Map and RelativeMap exceed\n" "devBuffSize. It is not clear if this is due to insufficient\n" "device or host memory or both. You can try:\n" " 1) increasing the amount of GPU memory requested\n" " 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n" " 3) using a GPU & host with more memory\n" "This issue is a known limitation and should be fixed in a \n" "future release of CHOLMOD.\n") ; return (0) ; } /* divvy up the memory in dev_mempool */ gpu_p->d_Lx[0] = Common->dev_mempool; gpu_p->d_Lx[1] = Common->dev_mempool + Common->devBuffSize; gpu_p->d_C = Common->dev_mempool + 2*Common->devBuffSize; gpu_p->d_A[0] = Common->dev_mempool + 3*Common->devBuffSize; gpu_p->d_A[1] = Common->dev_mempool + 4*Common->devBuffSize; gpu_p->d_Ls = Common->dev_mempool + 5*Common->devBuffSize; gpu_p->d_Map = gpu_p->d_Ls + (nls+1)*sizeof(Int) ; gpu_p->d_RelativeMap = gpu_p->d_Map + (n+1)*sizeof(Int) ; /* Copy all of the Ls and Lpi data to the device. If any supernodes are * to be computed on the device then this will be needed, so might as * well do it now. */ cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int), cudaMemcpyHostToDevice ); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); if (!(Common->gpuStream[0])) { /* ------------------------------------------------------------------ */ /* create each CUDA stream */ /* ------------------------------------------------------------------ */ for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) { cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ; return (0) ; } } /* ------------------------------------------------------------------ */ /* create each CUDA event */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 3 ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete), cudaEventDisableTiming ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ; return (0) ; } } gpu_p->h_Lx[0] = (float*)(Common->host_pinned_mempool); for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) { gpu_p->h_Lx[k] = (float*)((char *)(Common->host_pinned_mempool) + k*Common->devBuffSize); } return (1); /* initialization successfull, useGPU = 1 */ } /* ========================================================================== */ /* === gpu_reorder_descendants ============================================== */ /* ========================================================================== */ /* Reorder the descendant supernodes as: * 1st - descendant supernodes eligible for processing on the GPU * in increasing (by flops) order * 2nd - supernodes whose processing is to remain on the CPU * in any order * * All of the GPU-eligible supernodes will be scheduled first. All * CPU-eligible descendants will overlap with the last (largest) * CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants. */ void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants)) ( cholmod_common *Common, Int *Super, Int *locals, Int *Lpi, Int *Lpos, Int *Head, Int *Next, Int *Previous, Int *ndescendants, Int *tail, Int *mapCreatedOnGpu, cholmod_gpu_pointers *gpu_p ) { Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1; Int dnext, ndrow2, p; Int n_descendant = 0; float score; /* use h_Lx[0] to buffer the GPU-eligible descendants */ struct cholmod_descendant_score_t* scores = (struct cholmod_descendant_score_t*) gpu_p->h_Lx[0]; float cpuref = 0.0; int nreverse = 1; int previousd; d = Head[*locals]; prevd = -1; firstcpu = -1; *mapCreatedOnGpu = 0; while ( d != EMPTY ) { /* Get the parameters for the current descendant supernode */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; /* compute a rough flops 'score' for this descendant supernode */ score = ndrow2 * ndcol; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { score += Common->devBuffSize; } /* place in sort buffer */ scores[n_descendant].score = score; scores[n_descendant].d = d; n_descendant++; d = nextd; } /* Sort the GPU-eligible supernodes */ qsort ( scores, n_descendant, sizeof(struct cholmod_descendant_score_t), (__compar_fn_t) CHOLMOD(score_comp) ); /* Place sorted data back in descendant supernode linked list*/ if ( n_descendant > 0 ) { Head[*locals] = scores[0].d; if ( n_descendant > 1 ) { #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ if (n_descendant > 64) for ( k=1; k<n_descendant; k++ ) { Next[scores[k-1].d] = scores[k].d; } } Next[scores[n_descendant-1].d] = firstcpu; } /* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe communications */ if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) { previousd = Head[*locals]; d = Next[Head[*locals]]; while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) { kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; nreverse++; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { /* place this supernode at the front of the list */ Next[previousd] = Next[d]; Next[d] = Head[*locals]; Head[*locals] = d; } else { previousd = d; } d = nextd; } } /* create a 'previous' list so we can traverse backwards */ *ndescendants = 0; if ( Head[*locals] != EMPTY ) { Previous[Head[*locals]] = EMPTY; for (d = Head [*locals] ; d != EMPTY ; d = dnext) { (*ndescendants)++; dnext = Next[d]; if ( dnext != EMPTY ) { Previous[dnext] = d; } else { *tail = d; } } } return; } /* ========================================================================== */ /* === gpu_initialize_supernode ============================================= */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode)) ( cholmod_common *Common, Int nscol, Int nsrow, Int psi, cholmod_gpu_pointers *gpu_p ) { cudaError_t cuErr; /* initialize the device supernode assemby memory to zero */ cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(float) ); CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)"); /* Create the Map on the device */ createMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), psi, nsrow ); return; } /* ========================================================================== */ /* === gpu_updateC ========================================================== */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 * refers to all of the rows in L, but many of the rows are all zero. * Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range * k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows * in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. * Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ... * pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1 * rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower * triangular part of C1 needs to be computed since C1 is symmetric. * * UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer * d_Lx[] has been filled, all of the device operations are issues, and the * host can continue with filling the next input buffer / or start processing * all of the descendant supernodes which are not eligible for processing on * the device (since they are too small - will not fill the device). */ int TEMPLATE2 (CHOLMOD (gpu_updateC)) ( Int ndrow1, /* C is ndrow2-by-ndrow2 */ Int ndrow2, Int ndrow, /* leading dimension of Lx */ Int ndcol, /* L1 is ndrow1-by-ndcol */ Int nsrow, Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */ /* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */ Int pdi1, float *Lx, float *C, cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { float *devPtrLx, *devPtrC ; float alpha, beta ; cublasStatus_t cublasStatus ; cudaError_t cudaStat [2] ; Int ndrow3 ; int icol, irow; int iHostBuff, iDevBuff ; #ifndef NTIMER float tstart = 0; #endif if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) || (ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT)) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } ndrow3 = ndrow2 - ndrow1 ; #ifndef NTIMER Common->syrkStart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_SYRK_CALLS++ ; #endif /* ---------------------------------------------------------------------- */ /* allocate workspace on the GPU */ /* ---------------------------------------------------------------------- */ iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS, usually 2, so we can overlap the copy of this descendent supernode with the compute of the previous descendant supernode */ devPtrLx = (float *)(gpu_p->d_Lx[iDevBuff]); /* very little overlap between kernels for difference descendant supernodes (since we enforce the supernodes must be large enough to fill the device) so we only need one C buffer */ devPtrC = (float *)(gpu_p->d_C); /* ---------------------------------------------------------------------- */ /* copy Lx to the GPU */ /* ---------------------------------------------------------------------- */ /* copy host data to pinned buffer first for better H2D bandwidth */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32) for ( icol=0; icol<ndcol; icol++ ) { for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) { gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] = Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow]; } } cudaStat[0] = cudaMemcpyAsync ( devPtrLx, gpu_p->h_Lx[iHostBuff], ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]), cudaMemcpyHostToDevice, Common->gpuStream[iDevBuff] ); if ( cudaStat[0] ) { CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0])); return (0); } /* make the current stream wait for kernels in previous streams */ cudaStreamWaitEvent ( Common->gpuStream[iDevBuff], Common->updateCKernelsComplete, 0 ) ; /* ---------------------------------------------------------------------- */ /* create the relative map for this descendant supernode */ /* ---------------------------------------------------------------------- */ createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), (Int *)(gpu_p->d_RelativeMap), pdi1, ndrow2, &(Common->gpuStream[iDevBuff]) ); /* ---------------------------------------------------------------------- */ /* do the CUDA SYRK */ /* ---------------------------------------------------------------------- */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream[iDevBuff]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } alpha = 1.0 ; beta = 0.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */ &alpha, /* ALPHA: 1 */ devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ devPtrC, ndrow2) ; /* C, LDC: C1 */ #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ &alpha, /* ALPHA: 1 */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ (cuDoubleComplex *) devPtrC, ndrow2) ; /* C, LDC: C1 */ #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } #ifndef NTIMER Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart; #endif /* ---------------------------------------------------------------------- */ /* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_CALLS++ ; tstart = SuiteSparse_time(); #endif if (ndrow3 > 0) { #ifndef REAL cuDoubleComplex calpha = {1.0,0.0} ; cuDoubleComplex cbeta = {0.0,0.0} ; #endif /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ #ifdef REAL alpha = 1.0 ; beta = 0.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ndrow3, ndrow1, ndcol, /* M, N, K */ &alpha, /* ALPHA: 1 */ devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/ ndrow2, /* ndrow */ devPtrLx, /* B, LDB: L1 */ ndrow2, /* ndrow */ &beta, /* BETA: 0 */ devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, ndrow3, ndrow1, ndcol, /* M, N, K */ &calpha, /* ALPHA: 1 */ (const cuDoubleComplex*) devPtrLx + ndrow1, ndrow2, /* ndrow */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* ndrow */ &cbeta, /* BETA: 0 */ (cuDoubleComplex *)devPtrC + ndrow1, ndrow2) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart; #endif /* ------------------------------------------------------------------ */ /* Assemble the update C on the device using the d_RelativeMap */ /* ------------------------------------------------------------------ */ #ifdef REAL addUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #else addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #endif /* Record an event indicating that kernels for this descendant are complete */ cudaEventRecord ( Common->updateCKernelsComplete, Common->gpuStream[iDevBuff]); cudaEventRecord ( Common->updateCBuffersFree[iHostBuff], Common->gpuStream[iDevBuff]); return (1) ; } /* ========================================================================== */ /* === gpu_final_assembly =================================================== */ /* ========================================================================== */ /* If the supernode was assembled on both the CPU and the GPU, this will * complete the supernode assembly on both the GPU and CPU. */ void TEMPLATE2 (CHOLMOD (gpu_final_assembly)) ( cholmod_common *Common, float *Lx, Int psx, Int nscol, Int nsrow, int supernodeUsedGPU, int *iHostBuff, int *iDevBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; Int iHostBuff2 ; Int iDevBuff2 ; if ( supernodeUsedGPU ) { /* ------------------------------------------------------------------ */ /* Apply all of the Shur-complement updates, computed on the gpu, to */ /* the supernode. */ /* ------------------------------------------------------------------ */ *iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; *iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* If this supernode is going to be factored using the GPU (potrf) * then it will need the portion of the update assembled ont the * CPU. So copy that to a pinned buffer an H2D copy to device. */ /* wait until a buffer is free */ cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] ); /* copy update assembled on CPU to a pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx]; } } /* H2D transfer of update assembled on CPU */ cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff], nscol*nsrow*L_ENTRY*sizeof(float), cudaMemcpyHostToDevice, Common->gpuStream[*iDevBuff] ); } Common->ibuffer++; iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* wait for all kernels to complete */ cudaEventSynchronize( Common->updateCKernelsComplete ); /* copy assembled Schur-complement updates computed on GPU */ cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0], nscol*nsrow*L_ENTRY*sizeof(float), cudaMemcpyDeviceToHost, Common->gpuStream[iDevBuff2] ); if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* with the current implementation, potrf still uses data from the * CPU - so put the fully assembled supernode in a pinned buffer for * fastest access */ /* need both H2D and D2H copies to be complete */ cudaDeviceSynchronize(); /* sum updates from cpu and device on device */ #ifdef REAL sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #else sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #endif /* place final assembled supernode in pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } else { /* assemble with CPU updates */ cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } } return; } /* ========================================================================== */ /* === gpu_lower_potrf ====================================================== */ /* ========================================================================== */ /* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower * triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow. * * S is the top part of the supernode (the lower triangular matrx). * This function also copies the bottom rectangular part of the supernode (B) * onto the GPU, in preparation for gpu_triangular_solve. */ /* * On entry, d_A[1] contains the fully assembled supernode */ int TEMPLATE2 (CHOLMOD (gpu_lower_potrf)) ( Int nscol2, /* S is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of S */ Int psx, /* S is located at Lx + L_ENTRY*psx */ float *Lx, /* contains S; overwritten with Cholesky factor */ Int *info, /* BLAS info return value */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { float *devPtrA, *devPtrB, *A ; float alpha, beta ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ; int ilda, ijb, iinfo ; #ifndef NTIMER float tstart ; #endif if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_POTRF_CALLS++ ; #endif nsrow2 = nsrow - nscol2 ; /* ---------------------------------------------------------------------- */ /* heuristic to get the block size depending of the problem size */ /* ---------------------------------------------------------------------- */ nb = 128 ; if (nscol2 > 4096) nb = 256 ; if (nscol2 > 8192) nb = 384 ; n = nscol2 ; gpu_lda = ((nscol2+31)/32)*32 ; lda = nsrow ; A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)% CHOLMOD_HOST_SUPERNODE_BUFFERS]; /* ---------------------------------------------------------------------- */ /* determine the GPU leading dimension of B */ /* ---------------------------------------------------------------------- */ gpu_ldb = 0 ; if (nsrow2 > 0) { gpu_ldb = ((nsrow2+31)/32)*32 ; } /* ---------------------------------------------------------------------- */ /* remember where device memory is, to be used by triangular solve later */ /* ---------------------------------------------------------------------- */ devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* ---------------------------------------------------------------------- */ /* copy A from device to device */ /* ---------------------------------------------------------------------- */ cudaStat = cudaMemcpy2DAsync ( devPtrA, gpu_lda * L_ENTRY * sizeof (devPtrA[0]), gpu_p->d_A[1], nsrow * L_ENTRY * sizeof (Lx[0]), nscol2 * L_ENTRY * sizeof (devPtrA[0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0] ); if ( cudaStat ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device"); } /* ---------------------------------------------------------------------- */ /* copy B in advance, for gpu_triangular_solve */ /* ---------------------------------------------------------------------- */ if (nsrow2 > 0) { cudaStat = cudaMemcpy2DAsync (devPtrB, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_p->d_A[1] + L_ENTRY*nscol2, nsrow * L_ENTRY * sizeof (Lx [0]), nsrow2 * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } /* ------------------------------------------------------------------ */ /* define the dpotrf stream */ /* ------------------------------------------------------------------ */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream [0]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } /* ---------------------------------------------------------------------- */ /* block Cholesky factorization of S */ /* ---------------------------------------------------------------------- */ for (j = 0 ; j < n ; j += nb) { Int jb = nb < (n-j) ? nb : (n-j) ; /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dsyrk */ /* ------------------------------------------------------------------ */ alpha = -1.0 ; beta = 1.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, devPtrA + j, gpu_lda, &beta, devPtrA + j + j*gpu_lda, gpu_lda) ; #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, (cuDoubleComplex*)devPtrA + j, gpu_lda, &beta, (cuDoubleComplex*)devPtrA + j + j*gpu_lda, gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ cudaStat = cudaEventRecord (Common->cublasEventPotrf [0], Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream [1], Common->cublasEventPotrf [0], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } /* ------------------------------------------------------------------ */ /* copy back the jb columns on two different streams */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda), lda * L_ENTRY * sizeof (float), devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (float), L_ENTRY * sizeof (float)*jb, jb, cudaMemcpyDeviceToHost, Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = -1.0 ; beta = 1.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (n-j-jb), jb, j, &alpha, devPtrA + (j+jb), gpu_lda, devPtrA + (j) , gpu_lda, &beta, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {-1.0,0.0} ; cuDoubleComplex cbeta = { 1.0,0.0} ; cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, (n-j-jb), jb, j, &calpha, (cuDoubleComplex*)devPtrA + (j+jb), gpu_lda, (cuDoubleComplex*)devPtrA + (j), gpu_lda, &cbeta, (cuDoubleComplex*)devPtrA + (j+jb + j*gpu_lda), gpu_lda ) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* compute the Cholesky factorization of the jbxjb block on the CPU */ /* ------------------------------------------------------------------ */ ilda = (int) lda ; ijb = jb ; #ifdef REAL LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #else LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #endif *info = iinfo ; if (*info != 0) { *info = *info + j ; break ; } /* ------------------------------------------------------------------ */ /* copy the result back to the GPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (float), A + L_ENTRY * (j + j*lda), lda * L_ENTRY * sizeof (float), L_ENTRY * sizeof (float) * jb, jb, cudaMemcpyHostToDevice, Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dtrsm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = 1.0 ; cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &alpha, devPtrA + (j + j*gpu_lda), gpu_lda, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {1.0,0.0}; cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &calpha, (cuDoubleComplex *)devPtrA + (j + j*gpu_lda), gpu_lda, (cuDoubleComplex *)devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* -------------------------------------------------------------- */ /* Copy factored column back to host. */ /* -------------------------------------------------------------- */ cudaStat = cudaEventRecord (Common->cublasEventPotrf[2], Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream[1], Common->cublasEventPotrf[2], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda), lda * L_ENTRY * sizeof (float), devPtrA + L_ENTRY* (j + jb + j * gpu_lda), gpu_lda * L_ENTRY * sizeof (float), L_ENTRY * sizeof (float)* (n - j - jb), jb, cudaMemcpyDeviceToHost, Common->gpuStream[1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } } #ifndef NTIMER Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_triangular_solve ================================================= */ /* ========================================================================== */ /* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal * block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows * k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' = * S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in * MATLAB notation. */ /* Version with pre-allocation in POTRF */ int TEMPLATE2 (CHOLMOD (gpu_triangular_solve)) ( Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */ Int nscol2, /* L1 is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of L1, L2, and S2 */ Int psx, /* L1 is at Lx+L_ENTRY*psx; * L2 at Lx+L_ENTRY*(psx+nscol2)*/ float *Lx, /* holds L1, L2, and S2 */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { float *devPtrA, *devPtrB ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int gpu_lda, gpu_ldb, gpu_rowstep ; Int gpu_row_start = 0 ; Int gpu_row_max_chunk, gpu_row_chunk; int ibuf = 0; int iblock = 0; int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) % CHOLMOD_HOST_SUPERNODE_BUFFERS; int i, j; Int iidx; int iwrap; #ifndef NTIMER float tstart ; #endif #ifdef REAL float alpha = 1.0 ; gpu_row_max_chunk = 768; #else cuDoubleComplex calpha = {1.0,0.0} ; gpu_row_max_chunk = 256; #endif if ( nsrow2 <= 0 ) { return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_TRSM_CALLS++ ; #endif gpu_lda = ((nscol2+31)/32)*32 ; gpu_ldb = ((nsrow2+31)/32)*32 ; devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* make sure the copy of B has completed */ cudaStreamSynchronize( Common->gpuStream[0] ); /* ---------------------------------------------------------------------- */ /* do the CUDA BLAS dtrsm */ /* ---------------------------------------------------------------------- */ while ( gpu_row_start < nsrow2 ) { gpu_row_chunk = nsrow2 - gpu_row_start; if ( gpu_row_chunk > gpu_row_max_chunk ) { gpu_row_chunk = gpu_row_max_chunk; } cublasStatus = cublasSetStream ( Common->cublasHandle, Common->gpuStream[ibuf] ); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream"); } #ifdef REAL cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &alpha, devPtrA, gpu_lda, devPtrB + gpu_row_start, gpu_ldb) ; #else cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &calpha, (const cuDoubleComplex *) devPtrA, gpu_lda, (cuDoubleComplex *)devPtrB + gpu_row_start , gpu_ldb) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ /* copy result back to the CPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync ( gpu_p->h_Lx[iHostBuff] + L_ENTRY*(nscol2+gpu_row_start), nsrow * L_ENTRY * sizeof (Lx [0]), devPtrB + L_ENTRY*gpu_row_start, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_row_chunk * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToHost, Common->gpuStream[ibuf]); if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } cudaEventRecord ( Common->updateCBuffersFree[ibuf], Common->gpuStream[ibuf] ); gpu_row_start += gpu_row_chunk; ibuf++; ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS; iblock ++; if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS ) { Int gpu_row_start2 ; Int gpu_row_end ; /* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been * scheduled, so check for completed events and copy result into * Lx before continuing. */ cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } } /* Convenient to copy the L1 block here */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private ( iidx ) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY + i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } /* now account for the last HSTREAMS buffers */ for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ ) { int i, j; Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 && gpu_row_start2 < nsrow ) { Int iidx; Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } iblock++; } /* ---------------------------------------------------------------------- */ /* return */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_copy_supernode =================================================== */ /* ========================================================================== */ /* * In the event gpu_triangular_sovle is not needed / called, this routine * copies the factored diagonal block from the GPU to the CPU. */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, float *Lx, Int psx, Int nscol, Int nscol2, Int nsrow, int supernodeUsedGPU, int iHostBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx,i,j) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } return; } #endif #undef REAL #undef COMPLEX #undef ZOMPLEX
omp_bugparfoc.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { const size_t N = 100; const size_t chunk = 3; int i, tid; float a[N], b[N], c[N]; for (i = 0; i < N; ++i) { a[i] = b[i] = (float)i; } #pragma omp parallel \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { tid = omp_get_thread_num(); for (i = 0; i < N; ++i) { c[i] = a[i] + b[i]; printf("tid = %d, c[%d] = %f\n", tid, i, c[i]); } } return 0; }
thermodynamics.c
/* Generated by Cython 0.29.12 */ /* BEGIN: Cython Metadata { "distutils": { "extra_compile_args": [ "/openmp" ], "name": "quantas.utils.physics.thermodynamics", "sources": [ "quantas/utils/physics/thermodynamics.pyx" ] }, "module_name": "quantas.utils.physics.thermodynamics" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_12" #define CYTHON_HEX_VERSION 0x001D0CF0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__quantas__utils__physics__thermodynamics #define __PYX_HAVE_API__quantas__utils__physics__thermodynamics /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "quantas\\utils\\physics\\thermodynamics.pyx", "stringsource", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'quantas.utils.physics.thermodynamics' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static double __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_enthalpy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_gibbs(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_adiabatic_bulk_modulus(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_gruneisen_parameter(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "quantas.utils.physics.thermodynamics" extern int __pyx_module_is_main_quantas__utils__physics__thermodynamics; int __pyx_module_is_main_quantas__utils__physics__thermodynamics = 0; /* Implementation of 'quantas.utils.physics.thermodynamics' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_F[] = "F"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_T[] = "T"; static const char __pyx_k_U[] = "U"; static const char __pyx_k_V[] = "V"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_p[] = "p"; static const char __pyx_k_Cv[] = "Cv"; static const char __pyx_k_KT[] = "KT"; static const char __pyx_k_cs[] = "cs"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_pf[] = "pf"; static const char __pyx_k__19[] = "*"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_alpha[] = "alpha"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Avogadro[] = "Avogadro"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_scipy_constants[] = "scipy.constants"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_n_s_Avogadro; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Cv; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_F; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_KT; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_T; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_n_s_U; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_V; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s__19; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_cs; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_p; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pf; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_scipy_constants; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_enthalpy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_U); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_2gibbs(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_F); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_4adiabatic_bulk_modulus(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_T, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv, double __pyx_v_pf); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_6gruneisen_parameter(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_codeobj__26; /* Late includes */ /* "quantas/utils/physics/thermodynamics.pyx":21 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef enthalpy(double[:,::1] V, double[::1] p, double[:,::1] U): # <<<<<<<<<<<<<< * """ * This functions calculates the enthalpy (H) of the system */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_1enthalpy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_enthalpy(__Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_U, CYTHON_UNUSED int __pyx_skip_dispatch) { Py_ssize_t __pyx_v_n; Py_ssize_t __pyx_v_m; int __pyx_v_i; int __pyx_v_j; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_p_v = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_V_v = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_U_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_result = NULL; __Pyx_memviewslice __pyx_v_result_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; __Pyx_RefNannySetupContext("enthalpy", 0); /* "quantas/utils/physics/thermodynamics.pyx":43 * * """ * cdef Py_ssize_t n = V.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t m = V.shape[1] * cdef int i, j */ __pyx_v_n = (__pyx_v_V.shape[0]); /* "quantas/utils/physics/thermodynamics.pyx":44 * """ * cdef Py_ssize_t n = V.shape[0] * cdef Py_ssize_t m = V.shape[1] # <<<<<<<<<<<<<< * cdef int i, j * cdef double[::1] p_v = p */ __pyx_v_m = (__pyx_v_V.shape[1]); /* "quantas/utils/physics/thermodynamics.pyx":46 * cdef Py_ssize_t m = V.shape[1] * cdef int i, j * cdef double[::1] p_v = p # <<<<<<<<<<<<<< * cdef double[:,::1] V_v = V * cdef double[:,::1] U_v = U */ __PYX_INC_MEMVIEW(&__pyx_v_p, 0); __pyx_v_p_v = __pyx_v_p; /* "quantas/utils/physics/thermodynamics.pyx":47 * cdef int i, j * cdef double[::1] p_v = p * cdef double[:,::1] V_v = V # <<<<<<<<<<<<<< * cdef double[:,::1] U_v = U * */ __PYX_INC_MEMVIEW(&__pyx_v_V, 0); __pyx_v_V_v = __pyx_v_V; /* "quantas/utils/physics/thermodynamics.pyx":48 * cdef double[::1] p_v = p * cdef double[:,::1] V_v = V * cdef double[:,::1] U_v = U # <<<<<<<<<<<<<< * * result = np.zeros( (n,m), dtype=np.float64 ) */ __PYX_INC_MEMVIEW(&__pyx_v_U, 0); __pyx_v_U_v = __pyx_v_U; /* "quantas/utils/physics/thermodynamics.pyx":50 * cdef double[:,::1] U_v = U * * result = np.zeros( (n,m), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] result_v = result * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_m); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 50, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_result = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/physics/thermodynamics.pyx":51 * * result = np.zeros( (n,m), dtype=np.float64 ) * cdef double[:,::1] result_v = result # <<<<<<<<<<<<<< * * for i in prange(n, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 51, __pyx_L1_error) __pyx_v_result_v = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/physics/thermodynamics.pyx":53 * cdef double[:,::1] result_v = result * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * result_v[i,j] = U[i,j] + p[j]*V[i,j]*NA/1000 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_n; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "quantas/utils/physics/thermodynamics.pyx":54 * * for i in prange(n, nogil=True): * for j in range(m): # <<<<<<<<<<<<<< * result_v[i,j] = U[i,j] + p[j]*V[i,j]*NA/1000 * */ __pyx_t_10 = __pyx_v_m; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/physics/thermodynamics.pyx":55 * for i in prange(n, nogil=True): * for j in range(m): * result_v[i,j] = U[i,j] + p[j]*V[i,j]*NA/1000 # <<<<<<<<<<<<<< * * return result */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_j; __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_v.data + __pyx_t_18 * __pyx_v_result_v.strides[0]) )) + __pyx_t_19)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_U.data + __pyx_t_13 * __pyx_v_U.strides[0]) )) + __pyx_t_14)) ))) + ((((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_V.data + __pyx_t_16 * __pyx_v_V.strides[0]) )) + __pyx_t_17)) )))) * __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA) / 1000.0)); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/physics/thermodynamics.pyx":53 * cdef double[:,::1] result_v = result * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * result_v[i,j] = U[i,j] + p[j]*V[i,j]*NA/1000 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/physics/thermodynamics.pyx":57 * result_v[i,j] = U[i,j] + p[j]*V[i,j]*NA/1000 * * return result # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "quantas/utils/physics/thermodynamics.pyx":21 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef enthalpy(double[:,::1] V, double[::1] p, double[:,::1] U): # <<<<<<<<<<<<<< * """ * This functions calculates the enthalpy (H) of the system */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.enthalpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_p_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_V_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_U_v, 1); __Pyx_XDECREF(__pyx_v_result); __PYX_XDEC_MEMVIEW(&__pyx_v_result_v, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_1enthalpy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7quantas_5utils_7physics_14thermodynamics_enthalpy[] = "\n This functions calculates the enthalpy (H) of the system\n according to the formula:\n\n .. math:: H\\big(T,P \\big) = U\\big(T,P \\big) + pV\\big(T,P \\big)\n\n Attributes\n ----------\n V: ndarray\n 2D array of volume values (in m^3).\n p: ndarray\n 2D array of pressure values (in Pa).\n U: ndarray\n 2D array of internal energy (in kJ/mol).\n \n Returns\n -------\n H: ndarray\n 2D array of enthalpy values (in kJ/mol).\n\n "; static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_1enthalpy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_V = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_p = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_U = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("enthalpy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_V,&__pyx_n_s_p,&__pyx_n_s_U,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_V)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("enthalpy", 1, 3, 3, 1); __PYX_ERR(0, 21, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_U)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("enthalpy", 1, 3, 3, 2); __PYX_ERR(0, 21, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "enthalpy") < 0)) __PYX_ERR(0, 21, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_V = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_V.memview)) __PYX_ERR(0, 21, __pyx_L3_error) __pyx_v_p = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_p.memview)) __PYX_ERR(0, 21, __pyx_L3_error) __pyx_v_U = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_U.memview)) __PYX_ERR(0, 21, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("enthalpy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 21, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.enthalpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_7physics_14thermodynamics_enthalpy(__pyx_self, __pyx_v_V, __pyx_v_p, __pyx_v_U); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_enthalpy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_U) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("enthalpy", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_V.memview)) { __Pyx_RaiseUnboundLocalError("V"); __PYX_ERR(0, 21, __pyx_L1_error) } if (unlikely(!__pyx_v_p.memview)) { __Pyx_RaiseUnboundLocalError("p"); __PYX_ERR(0, 21, __pyx_L1_error) } if (unlikely(!__pyx_v_U.memview)) { __Pyx_RaiseUnboundLocalError("U"); __PYX_ERR(0, 21, __pyx_L1_error) } __pyx_t_1 = __pyx_f_7quantas_5utils_7physics_14thermodynamics_enthalpy(__pyx_v_V, __pyx_v_p, __pyx_v_U, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.enthalpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_V, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_p, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_U, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/physics/thermodynamics.pyx":61 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef gibbs(double[:,::1] V, double[::1] p, double[:,::1] F): # <<<<<<<<<<<<<< * """ * This functions calculates the Gibbs free energy (G) of the system */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_3gibbs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_gibbs(__Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_F, CYTHON_UNUSED int __pyx_skip_dispatch) { Py_ssize_t __pyx_v_n; Py_ssize_t __pyx_v_m; int __pyx_v_i; int __pyx_v_j; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_p_v = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_V_v = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_F_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_result = NULL; __Pyx_memviewslice __pyx_v_result_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; __Pyx_RefNannySetupContext("gibbs", 0); /* "quantas/utils/physics/thermodynamics.pyx":83 * * """ * cdef Py_ssize_t n = V.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t m = V.shape[1] * cdef int i, j */ __pyx_v_n = (__pyx_v_V.shape[0]); /* "quantas/utils/physics/thermodynamics.pyx":84 * """ * cdef Py_ssize_t n = V.shape[0] * cdef Py_ssize_t m = V.shape[1] # <<<<<<<<<<<<<< * cdef int i, j * cdef double[::1] p_v = p */ __pyx_v_m = (__pyx_v_V.shape[1]); /* "quantas/utils/physics/thermodynamics.pyx":86 * cdef Py_ssize_t m = V.shape[1] * cdef int i, j * cdef double[::1] p_v = p # <<<<<<<<<<<<<< * cdef double[:,::1] V_v = V * cdef double[:,::1] F_v = F */ __PYX_INC_MEMVIEW(&__pyx_v_p, 0); __pyx_v_p_v = __pyx_v_p; /* "quantas/utils/physics/thermodynamics.pyx":87 * cdef int i, j * cdef double[::1] p_v = p * cdef double[:,::1] V_v = V # <<<<<<<<<<<<<< * cdef double[:,::1] F_v = F * */ __PYX_INC_MEMVIEW(&__pyx_v_V, 0); __pyx_v_V_v = __pyx_v_V; /* "quantas/utils/physics/thermodynamics.pyx":88 * cdef double[::1] p_v = p * cdef double[:,::1] V_v = V * cdef double[:,::1] F_v = F # <<<<<<<<<<<<<< * * result = np.zeros( (n,m), dtype=np.float64 ) */ __PYX_INC_MEMVIEW(&__pyx_v_F, 0); __pyx_v_F_v = __pyx_v_F; /* "quantas/utils/physics/thermodynamics.pyx":90 * cdef double[:,::1] F_v = F * * result = np.zeros( (n,m), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] result_v = result * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_m); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_result = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/physics/thermodynamics.pyx":91 * * result = np.zeros( (n,m), dtype=np.float64 ) * cdef double[:,::1] result_v = result # <<<<<<<<<<<<<< * * for i in prange(n, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 91, __pyx_L1_error) __pyx_v_result_v = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/physics/thermodynamics.pyx":93 * cdef double[:,::1] result_v = result * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * result_v[i,j] = F[i,j] + p[j]*V[i,j]*NA/1000. */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_n; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "quantas/utils/physics/thermodynamics.pyx":94 * * for i in prange(n, nogil=True): * for j in range(m): # <<<<<<<<<<<<<< * result_v[i,j] = F[i,j] + p[j]*V[i,j]*NA/1000. * */ __pyx_t_10 = __pyx_v_m; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/physics/thermodynamics.pyx":95 * for i in prange(n, nogil=True): * for j in range(m): * result_v[i,j] = F[i,j] + p[j]*V[i,j]*NA/1000. # <<<<<<<<<<<<<< * * return result */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_j; __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_v.data + __pyx_t_18 * __pyx_v_result_v.strides[0]) )) + __pyx_t_19)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_F.data + __pyx_t_13 * __pyx_v_F.strides[0]) )) + __pyx_t_14)) ))) + ((((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_V.data + __pyx_t_16 * __pyx_v_V.strides[0]) )) + __pyx_t_17)) )))) * __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA) / 1000.)); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/physics/thermodynamics.pyx":93 * cdef double[:,::1] result_v = result * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * result_v[i,j] = F[i,j] + p[j]*V[i,j]*NA/1000. */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/physics/thermodynamics.pyx":97 * result_v[i,j] = F[i,j] + p[j]*V[i,j]*NA/1000. * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "quantas/utils/physics/thermodynamics.pyx":61 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef gibbs(double[:,::1] V, double[::1] p, double[:,::1] F): # <<<<<<<<<<<<<< * """ * This functions calculates the Gibbs free energy (G) of the system */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gibbs", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_p_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_V_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_F_v, 1); __Pyx_XDECREF(__pyx_v_result); __PYX_XDEC_MEMVIEW(&__pyx_v_result_v, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_3gibbs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7quantas_5utils_7physics_14thermodynamics_2gibbs[] = "\n This functions calculates the Gibbs free energy (G) of the system\n according to the formula:\n\n .. math:: G \\big(T,P \\big) = F\\big(T,P \\big) + pV\\big(T,P \\big)\n\n Attributes\n ----------\n V: ndarray\n 2D array of volume values (in m^3).\n p: ndarray\n 2D array of pressure values (in Pa).\n F: ndarray\n 2D array of Helmholtz free energy (in kJ/mol).\n \n Returns\n -------\n H: ndarray\n 2D array of Gibbs free energy values (in kJ/mol).\n\n "; static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_3gibbs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_V = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_p = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_F = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gibbs (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_V,&__pyx_n_s_p,&__pyx_n_s_F,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_V)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gibbs", 1, 3, 3, 1); __PYX_ERR(0, 61, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_F)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gibbs", 1, 3, 3, 2); __PYX_ERR(0, 61, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gibbs") < 0)) __PYX_ERR(0, 61, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_V = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_V.memview)) __PYX_ERR(0, 61, __pyx_L3_error) __pyx_v_p = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_p.memview)) __PYX_ERR(0, 61, __pyx_L3_error) __pyx_v_F = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_F.memview)) __PYX_ERR(0, 61, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gibbs", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 61, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gibbs", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_7physics_14thermodynamics_2gibbs(__pyx_self, __pyx_v_V, __pyx_v_p, __pyx_v_F); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_2gibbs(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_p, __Pyx_memviewslice __pyx_v_F) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("gibbs", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_V.memview)) { __Pyx_RaiseUnboundLocalError("V"); __PYX_ERR(0, 61, __pyx_L1_error) } if (unlikely(!__pyx_v_p.memview)) { __Pyx_RaiseUnboundLocalError("p"); __PYX_ERR(0, 61, __pyx_L1_error) } if (unlikely(!__pyx_v_F.memview)) { __Pyx_RaiseUnboundLocalError("F"); __PYX_ERR(0, 61, __pyx_L1_error) } __pyx_t_1 = __pyx_f_7quantas_5utils_7physics_14thermodynamics_gibbs(__pyx_v_V, __pyx_v_p, __pyx_v_F, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gibbs", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_V, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_p, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_F, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/physics/thermodynamics.pyx":103 * @cython.wraparound(False) * @cython.cdivision(True) * cpdef adiabatic_bulk_modulus(double[::1] T, double[:,::1] V, double[:,::1] KT, # <<<<<<<<<<<<<< * double[:,::1] alpha, double[:,::1] Cv, double pf): * """ */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_5adiabatic_bulk_modulus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_adiabatic_bulk_modulus(__Pyx_memviewslice __pyx_v_T, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv, double __pyx_v_pf, CYTHON_UNUSED int __pyx_skip_dispatch) { Py_ssize_t __pyx_v_n; Py_ssize_t __pyx_v_m; int __pyx_v_i; int __pyx_v_j; __Pyx_memviewslice __pyx_v_T_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_V_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_KT_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Cv_v = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_factor; PyObject *__pyx_v_KS = NULL; __Pyx_memviewslice __pyx_v_KS_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; int __pyx_t_13; Py_ssize_t __pyx_t_14; int __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; __Pyx_RefNannySetupContext("adiabatic_bulk_modulus", 0); /* "quantas/utils/physics/thermodynamics.pyx":133 * Adiabatic bulk modulus (KS). * """ * cdef Py_ssize_t n = V.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t m = V.shape[1] * cdef int i, j */ __pyx_v_n = (__pyx_v_V.shape[0]); /* "quantas/utils/physics/thermodynamics.pyx":134 * """ * cdef Py_ssize_t n = V.shape[0] * cdef Py_ssize_t m = V.shape[1] # <<<<<<<<<<<<<< * cdef int i, j * cdef double[::1] T_v = T */ __pyx_v_m = (__pyx_v_V.shape[1]); /* "quantas/utils/physics/thermodynamics.pyx":136 * cdef Py_ssize_t m = V.shape[1] * cdef int i, j * cdef double[::1] T_v = T # <<<<<<<<<<<<<< * cdef double[:,::1] V_v = V * cdef double[:,::1] KT_v = KT */ __PYX_INC_MEMVIEW(&__pyx_v_T, 0); __pyx_v_T_v = __pyx_v_T; /* "quantas/utils/physics/thermodynamics.pyx":137 * cdef int i, j * cdef double[::1] T_v = T * cdef double[:,::1] V_v = V # <<<<<<<<<<<<<< * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha */ __PYX_INC_MEMVIEW(&__pyx_v_V, 0); __pyx_v_V_v = __pyx_v_V; /* "quantas/utils/physics/thermodynamics.pyx":138 * cdef double[::1] T_v = T * cdef double[:,::1] V_v = V * cdef double[:,::1] KT_v = KT # <<<<<<<<<<<<<< * cdef double[:,::1] alpha_v = alpha * cdef double[:,::1] Cv_v = Cv */ __PYX_INC_MEMVIEW(&__pyx_v_KT, 0); __pyx_v_KT_v = __pyx_v_KT; /* "quantas/utils/physics/thermodynamics.pyx":139 * cdef double[:,::1] V_v = V * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha # <<<<<<<<<<<<<< * cdef double[:,::1] Cv_v = Cv * cdef double factor = pf */ __PYX_INC_MEMVIEW(&__pyx_v_alpha, 0); __pyx_v_alpha_v = __pyx_v_alpha; /* "quantas/utils/physics/thermodynamics.pyx":140 * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha * cdef double[:,::1] Cv_v = Cv # <<<<<<<<<<<<<< * cdef double factor = pf * */ __PYX_INC_MEMVIEW(&__pyx_v_Cv, 0); __pyx_v_Cv_v = __pyx_v_Cv; /* "quantas/utils/physics/thermodynamics.pyx":141 * cdef double[:,::1] alpha_v = alpha * cdef double[:,::1] Cv_v = Cv * cdef double factor = pf # <<<<<<<<<<<<<< * * KS = np.zeros( (n,m), dtype=np.float64 ) */ __pyx_v_factor = __pyx_v_pf; /* "quantas/utils/physics/thermodynamics.pyx":143 * cdef double factor = pf * * KS = np.zeros( (n,m), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] KS_v = KS * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_m); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_KS = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/physics/thermodynamics.pyx":144 * * KS = np.zeros( (n,m), dtype=np.float64 ) * cdef double[:,::1] KS_v = KS # <<<<<<<<<<<<<< * * for i in prange(n, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_KS, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 144, __pyx_L1_error) __pyx_v_KS_v = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/physics/thermodynamics.pyx":146 * cdef double[:,::1] KS_v = KS * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * if T_v[i] == 0. or Cv_v[i,j] == 0.: */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_n; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "quantas/utils/physics/thermodynamics.pyx":147 * * for i in prange(n, nogil=True): * for j in range(m): # <<<<<<<<<<<<<< * if T_v[i] == 0. or Cv_v[i,j] == 0.: * KS_v[i,j] = KT_v[i,j] */ __pyx_t_10 = __pyx_v_m; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/physics/thermodynamics.pyx":148 * for i in prange(n, nogil=True): * for j in range(m): * if T_v[i] == 0. or Cv_v[i,j] == 0.: # <<<<<<<<<<<<<< * KS_v[i,j] = KT_v[i,j] * else: */ __pyx_t_14 = __pyx_v_i; __pyx_t_15 = (((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_T_v.data) + __pyx_t_14)) ))) == 0.) != 0); if (!__pyx_t_15) { } else { __pyx_t_13 = __pyx_t_15; goto __pyx_L13_bool_binop_done; } __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_j; __pyx_t_15 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Cv_v.data + __pyx_t_16 * __pyx_v_Cv_v.strides[0]) )) + __pyx_t_17)) ))) == 0.) != 0); __pyx_t_13 = __pyx_t_15; __pyx_L13_bool_binop_done:; if (__pyx_t_13) { /* "quantas/utils/physics/thermodynamics.pyx":149 * for j in range(m): * if T_v[i] == 0. or Cv_v[i,j] == 0.: * KS_v[i,j] = KT_v[i,j] # <<<<<<<<<<<<<< * else: * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ */ __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; __pyx_t_20 = __pyx_v_i; __pyx_t_21 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KS_v.data + __pyx_t_20 * __pyx_v_KS_v.strides[0]) )) + __pyx_t_21)) )) = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KT_v.data + __pyx_t_18 * __pyx_v_KT_v.strides[0]) )) + __pyx_t_19)) ))); /* "quantas/utils/physics/thermodynamics.pyx":148 * for i in prange(n, nogil=True): * for j in range(m): * if T_v[i] == 0. or Cv_v[i,j] == 0.: # <<<<<<<<<<<<<< * KS_v[i,j] = KT_v[i,j] * else: */ goto __pyx_L12; } /* "quantas/utils/physics/thermodynamics.pyx":151 * KS_v[i,j] = KT_v[i,j] * else: * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ # <<<<<<<<<<<<<< * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) */ /*else*/ { __pyx_t_22 = __pyx_v_i; __pyx_t_23 = __pyx_v_j; __pyx_t_24 = __pyx_v_i; __pyx_t_25 = __pyx_v_j; __pyx_t_26 = __pyx_v_i; __pyx_t_27 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":152 * else: * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ # <<<<<<<<<<<<<< * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) * )/factor */ __pyx_t_28 = __pyx_v_i; __pyx_t_29 = __pyx_v_j; __pyx_t_30 = __pyx_v_i; __pyx_t_31 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":153 * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) # <<<<<<<<<<<<<< * )/factor * return KS */ __pyx_t_32 = __pyx_v_i; __pyx_t_33 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":152 * else: * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ # <<<<<<<<<<<<<< * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) * )/factor */ __pyx_t_34 = __pyx_v_i; /* "quantas/utils/physics/thermodynamics.pyx":153 * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) # <<<<<<<<<<<<<< * )/factor * return KS */ __pyx_t_35 = __pyx_v_i; __pyx_t_36 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":151 * KS_v[i,j] = KT_v[i,j] * else: * KS_v[i,j] = KT_v[i,j] + ( alpha_v[i,j] * alpha_v[i,j] * \ # <<<<<<<<<<<<<< * (factor * KT_v[i,j] * factor * KT_v[i,j]) * \ * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) */ __pyx_t_37 = __pyx_v_i; __pyx_t_38 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KS_v.data + __pyx_t_37 * __pyx_v_KS_v.strides[0]) )) + __pyx_t_38)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KT_v.data + __pyx_t_22 * __pyx_v_KT_v.strides[0]) )) + __pyx_t_23)) ))) + (((((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_alpha_v.data + __pyx_t_24 * __pyx_v_alpha_v.strides[0]) )) + __pyx_t_25)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_alpha_v.data + __pyx_t_26 * __pyx_v_alpha_v.strides[0]) )) + __pyx_t_27)) )))) * (((__pyx_v_factor * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KT_v.data + __pyx_t_28 * __pyx_v_KT_v.strides[0]) )) + __pyx_t_29)) )))) * __pyx_v_factor) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KT_v.data + __pyx_t_30 * __pyx_v_KT_v.strides[0]) )) + __pyx_t_31)) ))))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_V_v.data + __pyx_t_32 * __pyx_v_V_v.strides[0]) )) + __pyx_t_33)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_T_v.data) + __pyx_t_34)) )))) / ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Cv_v.data + __pyx_t_35 * __pyx_v_Cv_v.strides[0]) )) + __pyx_t_36)) ))) / __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA)) / __pyx_v_factor)); } __pyx_L12:; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/physics/thermodynamics.pyx":146 * cdef double[:,::1] KS_v = KS * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * if T_v[i] == 0. or Cv_v[i,j] == 0.: */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/physics/thermodynamics.pyx":155 * V_v[i,j] * T_v[i] / (Cv_v[i,j]/NA) * )/factor * return KS # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_KS); __pyx_r = __pyx_v_KS; goto __pyx_L0; /* "quantas/utils/physics/thermodynamics.pyx":103 * @cython.wraparound(False) * @cython.cdivision(True) * cpdef adiabatic_bulk_modulus(double[::1] T, double[:,::1] V, double[:,::1] KT, # <<<<<<<<<<<<<< * double[:,::1] alpha, double[:,::1] Cv, double pf): * """ */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.adiabatic_bulk_modulus", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_T_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_V_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_KT_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Cv_v, 1); __Pyx_XDECREF(__pyx_v_KS); __PYX_XDEC_MEMVIEW(&__pyx_v_KS_v, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_5adiabatic_bulk_modulus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7quantas_5utils_7physics_14thermodynamics_4adiabatic_bulk_modulus[] = "\n This method calculates the adiabatic bulk modulus (KS) according to:\n\n .. math::\n\n K_S = K_T + \\frac{\\alpha_V^2 V^2 K_T}{C_V}\n \n\n Attributes\n ----------\n T: ndarray\n 1D array of temperature values (in Kelvin).\n V: ndarray\n 2D array of volume values (in m^3).\n KT: ndarray\n 2D array of isothermal bulk modulus at P(V)-T condition.\n alpha: ndarray\n 2D array of thermal expansion coefficient at P(V)-T condition (in K^-1).\n Cv: ndarray\n 2D array of isochoric heat capacity at P(V)-T condition (in J mol^-1).\n factor: double\n factor to convert the from and to Pa.\n\n Returns\n -------\n ndarray\n Adiabatic bulk modulus (KS).\n "; static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_5adiabatic_bulk_modulus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_T = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_V = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_KT = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Cv = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_pf; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("adiabatic_bulk_modulus (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_T,&__pyx_n_s_V,&__pyx_n_s_KT,&__pyx_n_s_alpha,&__pyx_n_s_Cv,&__pyx_n_s_pf,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_T)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_V)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, 1); __PYX_ERR(0, 103, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_KT)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, 2); __PYX_ERR(0, 103, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, 3); __PYX_ERR(0, 103, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Cv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, 4); __PYX_ERR(0, 103, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pf)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, 5); __PYX_ERR(0, 103, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "adiabatic_bulk_modulus") < 0)) __PYX_ERR(0, 103, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_T = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_T.memview)) __PYX_ERR(0, 103, __pyx_L3_error) __pyx_v_V = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_V.memview)) __PYX_ERR(0, 103, __pyx_L3_error) __pyx_v_KT = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_KT.memview)) __PYX_ERR(0, 103, __pyx_L3_error) __pyx_v_alpha = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_alpha.memview)) __PYX_ERR(0, 104, __pyx_L3_error) __pyx_v_Cv = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Cv.memview)) __PYX_ERR(0, 104, __pyx_L3_error) __pyx_v_pf = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_pf == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("adiabatic_bulk_modulus", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 103, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.adiabatic_bulk_modulus", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_7physics_14thermodynamics_4adiabatic_bulk_modulus(__pyx_self, __pyx_v_T, __pyx_v_V, __pyx_v_KT, __pyx_v_alpha, __pyx_v_Cv, __pyx_v_pf); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_4adiabatic_bulk_modulus(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_T, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv, double __pyx_v_pf) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("adiabatic_bulk_modulus", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_T.memview)) { __Pyx_RaiseUnboundLocalError("T"); __PYX_ERR(0, 103, __pyx_L1_error) } if (unlikely(!__pyx_v_V.memview)) { __Pyx_RaiseUnboundLocalError("V"); __PYX_ERR(0, 103, __pyx_L1_error) } if (unlikely(!__pyx_v_KT.memview)) { __Pyx_RaiseUnboundLocalError("KT"); __PYX_ERR(0, 103, __pyx_L1_error) } if (unlikely(!__pyx_v_alpha.memview)) { __Pyx_RaiseUnboundLocalError("alpha"); __PYX_ERR(0, 103, __pyx_L1_error) } if (unlikely(!__pyx_v_Cv.memview)) { __Pyx_RaiseUnboundLocalError("Cv"); __PYX_ERR(0, 103, __pyx_L1_error) } __pyx_t_1 = __pyx_f_7quantas_5utils_7physics_14thermodynamics_adiabatic_bulk_modulus(__pyx_v_T, __pyx_v_V, __pyx_v_KT, __pyx_v_alpha, __pyx_v_Cv, __pyx_v_pf, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.adiabatic_bulk_modulus", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_T, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_V, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_KT, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Cv, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/physics/thermodynamics.pyx":161 * @cython.wraparound(False) * @cython.cdivision(True) * cpdef gruneisen_parameter(double[:,::1] V, double[:,::1] KT, # <<<<<<<<<<<<<< * double[:,::1] alpha, double[:,::1] Cv): * """ */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_7gruneisen_parameter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_7physics_14thermodynamics_gruneisen_parameter(__Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv, CYTHON_UNUSED int __pyx_skip_dispatch) { Py_ssize_t __pyx_v_n; Py_ssize_t __pyx_v_m; int __pyx_v_i; int __pyx_v_j; __Pyx_memviewslice __pyx_v_V_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_KT_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha_v = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Cv_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_gamma = NULL; __Pyx_memviewslice __pyx_v_gamma_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; int __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; __Pyx_RefNannySetupContext("gruneisen_parameter", 0); /* "quantas/utils/physics/thermodynamics.pyx":187 * Grneisen parameters (gamma). * """ * cdef Py_ssize_t n = V.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t m = V.shape[1] * */ __pyx_v_n = (__pyx_v_V.shape[0]); /* "quantas/utils/physics/thermodynamics.pyx":188 * """ * cdef Py_ssize_t n = V.shape[0] * cdef Py_ssize_t m = V.shape[1] # <<<<<<<<<<<<<< * * cdef int i, j */ __pyx_v_m = (__pyx_v_V.shape[1]); /* "quantas/utils/physics/thermodynamics.pyx":191 * * cdef int i, j * cdef double[:,::1] V_v = V # <<<<<<<<<<<<<< * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha */ __PYX_INC_MEMVIEW(&__pyx_v_V, 0); __pyx_v_V_v = __pyx_v_V; /* "quantas/utils/physics/thermodynamics.pyx":192 * cdef int i, j * cdef double[:,::1] V_v = V * cdef double[:,::1] KT_v = KT # <<<<<<<<<<<<<< * cdef double[:,::1] alpha_v = alpha * cdef double[:,::1] Cv_v = Cv */ __PYX_INC_MEMVIEW(&__pyx_v_KT, 0); __pyx_v_KT_v = __pyx_v_KT; /* "quantas/utils/physics/thermodynamics.pyx":193 * cdef double[:,::1] V_v = V * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha # <<<<<<<<<<<<<< * cdef double[:,::1] Cv_v = Cv * */ __PYX_INC_MEMVIEW(&__pyx_v_alpha, 0); __pyx_v_alpha_v = __pyx_v_alpha; /* "quantas/utils/physics/thermodynamics.pyx":194 * cdef double[:,::1] KT_v = KT * cdef double[:,::1] alpha_v = alpha * cdef double[:,::1] Cv_v = Cv # <<<<<<<<<<<<<< * * gamma = np.zeros( (n,m), dtype=np.float64 ) */ __PYX_INC_MEMVIEW(&__pyx_v_Cv, 0); __pyx_v_Cv_v = __pyx_v_Cv; /* "quantas/utils/physics/thermodynamics.pyx":196 * cdef double[:,::1] Cv_v = Cv * * gamma = np.zeros( (n,m), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] gamma_v = gamma * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_m); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_gamma = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/physics/thermodynamics.pyx":197 * * gamma = np.zeros( (n,m), dtype=np.float64 ) * cdef double[:,::1] gamma_v = gamma # <<<<<<<<<<<<<< * * for i in prange(n, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_gamma, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 197, __pyx_L1_error) __pyx_v_gamma_v = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/physics/thermodynamics.pyx":199 * cdef double[:,::1] gamma_v = gamma * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * if Cv_v[i,j] == 0.: */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_n; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "quantas/utils/physics/thermodynamics.pyx":200 * * for i in prange(n, nogil=True): * for j in range(m): # <<<<<<<<<<<<<< * if Cv_v[i,j] == 0.: * gamma_v[i,j] = 0. */ __pyx_t_10 = __pyx_v_m; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/physics/thermodynamics.pyx":201 * for i in prange(n, nogil=True): * for j in range(m): * if Cv_v[i,j] == 0.: # <<<<<<<<<<<<<< * gamma_v[i,j] = 0. * else: */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Cv_v.data + __pyx_t_13 * __pyx_v_Cv_v.strides[0]) )) + __pyx_t_14)) ))) == 0.) != 0); if (__pyx_t_15) { /* "quantas/utils/physics/thermodynamics.pyx":202 * for j in range(m): * if Cv_v[i,j] == 0.: * gamma_v[i,j] = 0. # <<<<<<<<<<<<<< * else: * gamma_v[i,j] = V_v[i,j] * KT_v[i,j] * alpha_v[i,j] /\ */ __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_gamma_v.data + __pyx_t_16 * __pyx_v_gamma_v.strides[0]) )) + __pyx_t_17)) )) = 0.; /* "quantas/utils/physics/thermodynamics.pyx":201 * for i in prange(n, nogil=True): * for j in range(m): * if Cv_v[i,j] == 0.: # <<<<<<<<<<<<<< * gamma_v[i,j] = 0. * else: */ goto __pyx_L12; } /* "quantas/utils/physics/thermodynamics.pyx":204 * gamma_v[i,j] = 0. * else: * gamma_v[i,j] = V_v[i,j] * KT_v[i,j] * alpha_v[i,j] /\ # <<<<<<<<<<<<<< * (Cv_v[i,j]/NA) * return gamma */ /*else*/ { __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; __pyx_t_20 = __pyx_v_i; __pyx_t_21 = __pyx_v_j; __pyx_t_22 = __pyx_v_i; __pyx_t_23 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":205 * else: * gamma_v[i,j] = V_v[i,j] * KT_v[i,j] * alpha_v[i,j] /\ * (Cv_v[i,j]/NA) # <<<<<<<<<<<<<< * return gamma */ __pyx_t_24 = __pyx_v_i; __pyx_t_25 = __pyx_v_j; /* "quantas/utils/physics/thermodynamics.pyx":204 * gamma_v[i,j] = 0. * else: * gamma_v[i,j] = V_v[i,j] * KT_v[i,j] * alpha_v[i,j] /\ # <<<<<<<<<<<<<< * (Cv_v[i,j]/NA) * return gamma */ __pyx_t_26 = __pyx_v_i; __pyx_t_27 = __pyx_v_j; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_gamma_v.data + __pyx_t_26 * __pyx_v_gamma_v.strides[0]) )) + __pyx_t_27)) )) = ((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_V_v.data + __pyx_t_18 * __pyx_v_V_v.strides[0]) )) + __pyx_t_19)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_KT_v.data + __pyx_t_20 * __pyx_v_KT_v.strides[0]) )) + __pyx_t_21)) )))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_alpha_v.data + __pyx_t_22 * __pyx_v_alpha_v.strides[0]) )) + __pyx_t_23)) )))) / ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Cv_v.data + __pyx_t_24 * __pyx_v_Cv_v.strides[0]) )) + __pyx_t_25)) ))) / __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA)); } __pyx_L12:; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/physics/thermodynamics.pyx":199 * cdef double[:,::1] gamma_v = gamma * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * for j in range(m): * if Cv_v[i,j] == 0.: */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/physics/thermodynamics.pyx":206 * gamma_v[i,j] = V_v[i,j] * KT_v[i,j] * alpha_v[i,j] /\ * (Cv_v[i,j]/NA) * return gamma # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_gamma); __pyx_r = __pyx_v_gamma; goto __pyx_L0; /* "quantas/utils/physics/thermodynamics.pyx":161 * @cython.wraparound(False) * @cython.cdivision(True) * cpdef gruneisen_parameter(double[:,::1] V, double[:,::1] KT, # <<<<<<<<<<<<<< * double[:,::1] alpha, double[:,::1] Cv): * """ */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gruneisen_parameter", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_V_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_KT_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha_v, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Cv_v, 1); __Pyx_XDECREF(__pyx_v_gamma); __PYX_XDEC_MEMVIEW(&__pyx_v_gamma_v, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_7gruneisen_parameter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7quantas_5utils_7physics_14thermodynamics_6gruneisen_parameter[] = "\n This method calculates the the Gr\303\274neisen parameter by\n\n .. math::\n\n \\gamma = \\frac{ \\alpha K_T V }{ C_V }.\n \n\n Attributes\n ----------\n V: ndarray\n 2D array of volume values (in m^3).\n KT: ndarray\n 2D array of isothermal bulk modulus at P(V)-T condition(in Pa).\n alpha: ndarray\n 2D array of thermal expansion coefficient at P(V)-T condition (in K^-1).\n Cv: ndarray\n 2D array of isochoric heat capacity at P(V)-T condition (in J mol^-1).\n\n Returns\n -------\n ndarray\n Gr\303\274neisen parameters (gamma).\n "; static PyObject *__pyx_pw_7quantas_5utils_7physics_14thermodynamics_7gruneisen_parameter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_V = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_KT = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Cv = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gruneisen_parameter (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_V,&__pyx_n_s_KT,&__pyx_n_s_alpha,&__pyx_n_s_Cv,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_V)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_KT)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gruneisen_parameter", 1, 4, 4, 1); __PYX_ERR(0, 161, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gruneisen_parameter", 1, 4, 4, 2); __PYX_ERR(0, 161, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Cv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gruneisen_parameter", 1, 4, 4, 3); __PYX_ERR(0, 161, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gruneisen_parameter") < 0)) __PYX_ERR(0, 161, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_V = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_V.memview)) __PYX_ERR(0, 161, __pyx_L3_error) __pyx_v_KT = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_KT.memview)) __PYX_ERR(0, 161, __pyx_L3_error) __pyx_v_alpha = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_alpha.memview)) __PYX_ERR(0, 162, __pyx_L3_error) __pyx_v_Cv = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Cv.memview)) __PYX_ERR(0, 162, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gruneisen_parameter", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 161, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gruneisen_parameter", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_7physics_14thermodynamics_6gruneisen_parameter(__pyx_self, __pyx_v_V, __pyx_v_KT, __pyx_v_alpha, __pyx_v_Cv); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_7physics_14thermodynamics_6gruneisen_parameter(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_V, __Pyx_memviewslice __pyx_v_KT, __Pyx_memviewslice __pyx_v_alpha, __Pyx_memviewslice __pyx_v_Cv) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("gruneisen_parameter", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_V.memview)) { __Pyx_RaiseUnboundLocalError("V"); __PYX_ERR(0, 161, __pyx_L1_error) } if (unlikely(!__pyx_v_KT.memview)) { __Pyx_RaiseUnboundLocalError("KT"); __PYX_ERR(0, 161, __pyx_L1_error) } if (unlikely(!__pyx_v_alpha.memview)) { __Pyx_RaiseUnboundLocalError("alpha"); __PYX_ERR(0, 161, __pyx_L1_error) } if (unlikely(!__pyx_v_Cv.memview)) { __Pyx_RaiseUnboundLocalError("Cv"); __PYX_ERR(0, 161, __pyx_L1_error) } __pyx_t_1 = __pyx_f_7quantas_5utils_7physics_14thermodynamics_gruneisen_parameter(__pyx_v_V, __pyx_v_KT, __pyx_v_alpha, __pyx_v_Cv, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.physics.thermodynamics.gruneisen_parameter", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_V, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_KT, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Cv, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":381 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":384 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":383 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":385 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":387 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":391 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 393, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 393, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":394 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":396 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":401 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":403 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 403, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 403, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 406, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":407 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":409 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 409, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":410 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 414, __pyx_L1_error) /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":416 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 416, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 416, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 418, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":419 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 420, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":421 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":423 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 423, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":425 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":431 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 431, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":432 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 432, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":433 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":435 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":442 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 442, __pyx_L1_error) /* "View.MemoryView":443 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":447 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":452 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":455 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":457 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 457, __pyx_L1_error) /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":458 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":460 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":462 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":466 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":471 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 471, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":472 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":475 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":478 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 478, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":479 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":484 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":487 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":489 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":494 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":495 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":490 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 491, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":500 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 500, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":506 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 506, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":508 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 508, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 510, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 516, __pyx_L1_error) /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":521 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":526 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":531 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":534 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":536 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":538 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":539 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":540 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":541 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":542 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":543 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 550, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":551 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 551, __pyx_L1_error) /* "View.MemoryView":552 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 566, __pyx_L1_error) /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":568 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":575 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":592 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":594 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":595 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":597 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":599 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":603 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":605 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":609 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":612 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":618 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":619 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":624 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":625 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 625, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":629 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":631 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":632 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 632, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":637 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 637, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":641 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":643 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":644 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 644, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":649 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":654 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":655 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":656 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":660 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":668 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":670 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":672 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":673 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":674 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 675, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 675, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":679 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":681 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 681, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":682 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":685 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 685, __pyx_L1_error) /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":687 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":688 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 688, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":690 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 690, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":692 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":694 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":697 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 699, __pyx_L1_error) /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":707 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":714 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":718 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 718, __pyx_L1_error) } } #endif /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":721 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 721, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":722 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":724 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":725 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":731 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":732 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":737 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":738 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 742, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 742, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":747 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) /* "View.MemoryView":744 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 744, __pyx_L1_error) /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":751 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":752 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":753 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":754 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":756 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":757 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":758 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 758, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 758, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":760 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":761 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":762 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":764 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 764, __pyx_L1_error) /* "View.MemoryView":770 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":774 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) } /* "View.MemoryView":775 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 775, __pyx_L1_error) } /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 773, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":779 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 778, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":826 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 828, __pyx_L1_error) /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":831 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":834 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 834, __pyx_L1_error) /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":846 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":851 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":862 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":864 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":871 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":874 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":877 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":880 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":881 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":882 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":888 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":895 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":896 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 895, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":898 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":900 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":908 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":909 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":913 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 913, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 913, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":914 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":916 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":917 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":922 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":924 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 924, __pyx_L1_error) /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":927 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 927, __pyx_L1_error) /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":929 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":931 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":933 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":940 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":942 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":943 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":947 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":948 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":949 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":950 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":953 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 953, __pyx_L1_error) /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":955 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":973 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":977 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":979 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":983 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 983, __pyx_L1_error) /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":985 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":989 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1004 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1009 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1011 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1012 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1014 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1015 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1017 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1018 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1019 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1020 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1021 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1026 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1028 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1029 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1032 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1033 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1035 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1036 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1038 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1039 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1040 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1042 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1043 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1045 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1052 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1052, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1053 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1055 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1056 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1063 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1064 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1065 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1067 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1068 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1070 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1071 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1072 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1073 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1079 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1080 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1080, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1092 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1094 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1095 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1097 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1099 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1107 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1109 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1117 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1118 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1120 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1122 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1123 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1125 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1127 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1128 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1133 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1143 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1144 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1145 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1146 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1150 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1151 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1153 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1154 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1155 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1156 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1158 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1159 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1163 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1164 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1169 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1176 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1178 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1179 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1181 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1194 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1195 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1196 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1198 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1199 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1200 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1202 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1216 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1217 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1219 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1221 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error) /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1224 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1225 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1226 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1227 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1228 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1230 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1234 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1236 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1241 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1243 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1251 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1250 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1250, __pyx_L1_error) /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1255 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1255, __pyx_L1_error) /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1260 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1260, __pyx_L1_error) /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1262 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1262, __pyx_L1_error) } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1273 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1274 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1276 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1277 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1278 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1282 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1286 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1288 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1292 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1294 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1304 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1305 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1311 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1317 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1318 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1319 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1326 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error) /* "View.MemoryView":1327 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1327, __pyx_L1_error) /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1329 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1330 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1331 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1333 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1334 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1341 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1343 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1344 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1345 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1346 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1348 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1349 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1350 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1351 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1364 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1371 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1378 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1381 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1383 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1385 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1386 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1388 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1397 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1398 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1400 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1408 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1409 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1412 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1413 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1414 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1416 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1417 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1419 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.physics.thermodynamics.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.physics.thermodynamics.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.physics.thermodynamics.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.physics.thermodynamics._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static PyMethodDef __pyx_methods[] = { {"enthalpy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_14thermodynamics_1enthalpy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_14thermodynamics_enthalpy}, {"gibbs", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_14thermodynamics_3gibbs, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_14thermodynamics_2gibbs}, {"adiabatic_bulk_modulus", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_14thermodynamics_5adiabatic_bulk_modulus, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_14thermodynamics_4adiabatic_bulk_modulus}, {"gruneisen_parameter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_14thermodynamics_7gruneisen_parameter, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_14thermodynamics_6gruneisen_parameter}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_thermodynamics(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_thermodynamics}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "thermodynamics", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_n_s_Avogadro, __pyx_k_Avogadro, sizeof(__pyx_k_Avogadro), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Cv, __pyx_k_Cv, sizeof(__pyx_k_Cv), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_F, __pyx_k_F, sizeof(__pyx_k_F), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_KT, __pyx_k_KT, sizeof(__pyx_k_KT), 0, 0, 1, 1}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_n_s_U, __pyx_k_U, sizeof(__pyx_k_U), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_V, __pyx_k_V, sizeof(__pyx_k_V), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_cs, __pyx_k_cs, sizeof(__pyx_k_cs), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pf, __pyx_k_pf, sizeof(__pyx_k_pf), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_scipy_constants, __pyx_k_scipy_constants, sizeof(__pyx_k_scipy_constants), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 54, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 400, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 609, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 828, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initthermodynamics(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initthermodynamics(void) #else __Pyx_PyMODINIT_FUNC PyInit_thermodynamics(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_thermodynamics(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_thermodynamics(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; double __pyx_t_3; static PyThread_type_lock __pyx_t_4[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'thermodynamics' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_thermodynamics(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("thermodynamics", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_quantas__utils__physics__thermodynamics) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "quantas.utils.physics.thermodynamics")) { if (unlikely(PyDict_SetItemString(modules, "quantas.utils.physics.thermodynamics", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "quantas/utils/physics/thermodynamics.pyx":14 * from cython.parallel import prange * * import scipy.constants as cs # <<<<<<<<<<<<<< * import numpy as np * */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s__19); __Pyx_GIVEREF(__pyx_n_s__19); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__19); __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_constants, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_cs, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "quantas/utils/physics/thermodynamics.pyx":15 * * import scipy.constants as cs * import numpy as np # <<<<<<<<<<<<<< * * cdef double NA = cs.Avogadro */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "quantas/utils/physics/thermodynamics.pyx":17 * import numpy as np * * cdef double NA = cs.Avogadro # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Avogadro); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_7quantas_5utils_7physics_14thermodynamics_NA = __pyx_t_3; /* "quantas/utils/physics/thermodynamics.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * ############################################################################## * # Copyright (c), Gianfranco Ulian and Giovanni Valdre'. # */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_4[0] = PyThread_allocate_lock(); __pyx_t_4[1] = PyThread_allocate_lock(); __pyx_t_4[2] = PyThread_allocate_lock(); __pyx_t_4[3] = PyThread_allocate_lock(); __pyx_t_4[4] = PyThread_allocate_lock(); __pyx_t_4[5] = PyThread_allocate_lock(); __pyx_t_4[6] = PyThread_allocate_lock(); __pyx_t_4[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_4, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init quantas.utils.physics.thermodynamics", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init quantas.utils.physics.thermodynamics"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
finitediff_c.c
#include <stdlib.h> /* malloc & free */ #include <string.h> /* memset */ #include "finitediff_c.h" #include "newton_interval.h" #ifdef FINITEDIFF_OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif void finitediff_calculate_weights( FINITEDIFF_REAL * const FINITEDIFF_RESTRICT w, const int ldw, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT grid, const int len_g, const int max_deriv, const FINITEDIFF_REAL around ) { int i, j, k, mn; FINITEDIFF_REAL c1, c2, c2_r, c3, c3_r, c4, c5; c1 = 1; c4 = grid[0] - around; memset(w, 0, sizeof(FINITEDIFF_REAL)*ldw*(max_deriv+1)); w[0] = 1; for (i = 1; i < len_g; ++i){ mn = FINITEDIFF_MIN(i, max_deriv); c2 = 1; c5 = c4; c4 = grid[i] - around; for (j = 0; j < i; ++j){ c3 = grid[i] - grid[j]; c3_r = 1/c3; c2 = c2*c3; if (j == i-1){ c2_r = 1/c2; for (k = mn; k >= 1; --k){ w[i + k*ldw] = c1*(k*w[i - 1 + (k-1)*ldw] - c5*w[i - 1 + k*ldw])*c2_r; } w[i] = -c1*c5*w[i-1]*c2_r; } for (k = mn; k >= 1; --k){ w[j + k*ldw] = (c4*w[j + k*ldw] - k*w[j + (k-1)*ldw])*c3_r; } w[j] = c4*w[j]*c3_r; } c1 = c2; } } void finitediff_apply_fd( FINITEDIFF_REAL * const FINITEDIFF_RESTRICT out, const int ld_out, FINITEDIFF_REAL * const FINITEDIFF_RESTRICT w, const int ldw, const int nsets, const int max_deriv, const int len_grid, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT ydata, const int ldy ) { int i, j, k; FINITEDIFF_REAL tmp; for (i=0; i<nsets; ++i){ for (j=0; j <= max_deriv; ++j){ tmp = 0; for (k=0; k<len_grid; ++k){ tmp += w[k + j*ldw] * ydata[ldy*i + k]; } out[i*ld_out + j] = tmp; } } } int finitediff_calc_and_apply_fd( FINITEDIFF_REAL * const FINITEDIFF_RESTRICT out, const int ld_out, const int nsets, const int max_deriv, const int len_grid, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT grid, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT ydata, const int ldy, const FINITEDIFF_REAL xtgt ) { int status = FINITEDIFF_STATUS_SUCCESS; FINITEDIFF_REAL * w; const int ldw=len_grid; w = (FINITEDIFF_REAL *)malloc(sizeof(FINITEDIFF_REAL)*ldw*(max_deriv+1)); if (!w) { status = FINITEDIFF_STATUS_ERR_BAD_ALLOC; goto exit0; } if (len_grid < max_deriv + 1){ status = FINITEDIFF_STATUS_ERR_TOO_SMALL_GRID; goto exit1; } if (ld_out < max_deriv + 1) { status = FINITEDIFF_STATUS_ERR_WRONG_LEADING_DIMENSION; goto exit1; } finitediff_calculate_weights(w, ldw, grid, len_grid, max_deriv, xtgt); finitediff_apply_fd(out, ld_out, w, ldw, nsets, max_deriv, len_grid, ydata, ldy); exit1: free(w); exit0: return status; } int finitediff_interpolate_by_finite_diff( FINITEDIFF_REAL * const FINITEDIFF_RESTRICT out, /* C-order: out[tgt_idx, set_idx, deriv_idx] */ const int len_targets, const int nsets, const int max_deriv, const int elem_strides_out_0, /* tgt_idx * elem_strides_out_0 */ const int elem_strides_out_1, /* set_idx * elem_strides_out_1, strides_out_2 assumed to be 1 */ const int ntail, const int nhead, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT grid, const int len_grid, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT ydata, /* C-order: ydata[set_idx, grid_idx] */ const int ldy, const FINITEDIFF_REAL * const FINITEDIFF_RESTRICT xtgts /* len(xtgts) == len_targets */ ) { FINITEDIFF_REAL xtgt; int tgt_idx, j=0, status=0, n_threads=1; const int nin = nhead + ntail; FINITEDIFF_REAL *w, *wp; const int elem_strides_w_1 = FINITEDIFF_MIN(len_grid, nin); #ifndef FINITEDIFF_OPENMP const int elem_strides_w_0 = elem_strides_w_1*(max_deriv+1); #else const int elem_strides_w_0 = FINITEDIFF_ROUND_L1(elem_strides_w_1*(max_deriv+1)); char * num_threads_var; num_threads_var = getenv("FINITEDIFF_NUM_THREADS"); if (num_threads_var) { n_threads = atoi(num_threads_var); if (!n_threads) { status = FINITEDIFF_STATUS_ERR_ILLEGAL_ENV_VAR; goto exit0; } } else { n_threads = omp_get_num_threads(); } #endif if (len_grid < max_deriv + 1){ status = FINITEDIFF_STATUS_ERR_TOO_SMALL_GRID; goto exit0; } if (nin < max_deriv + 1){ status = FINITEDIFF_STATUS_ERR_TOO_FEW_POINTS; goto exit0; } w = (FINITEDIFF_REAL *)malloc(sizeof(FINITEDIFF_REAL)*elem_strides_w_0*n_threads); if (!w) { status = FINITEDIFF_STATUS_ERR_BAD_ALLOC; goto exit0; } #ifdef FINITEDIFF_OPENMP #pragma omp parallel for private(xtgt, wp) firstprivate(j) schedule(static) num_threads(n_threads) #endif for (tgt_idx=0; tgt_idx<len_targets; ++tgt_idx) { xtgt = xtgts[tgt_idx]; j = get_interval_from_guess(grid, len_grid, xtgt, j) - nhead; j = FINITEDIFF_MAX(0, FINITEDIFF_MIN(j, len_grid - nin)); wp = w + omp_get_thread_num()*elem_strides_w_0; finitediff_calculate_weights(wp, elem_strides_w_1, grid+j, elem_strides_w_1, max_deriv, xtgt); finitediff_apply_fd(out + tgt_idx*elem_strides_out_0, elem_strides_out_1, wp, elem_strides_w_1, nsets, max_deriv, elem_strides_w_1, ydata + j, ldy); } free(w); exit0: return status; }
GJ.c
#include "GJ.h" /* --------------------------------------------- IMPLEMENTATIONS -------------------------------------------------- */ /* /* * Dada uma matrix e o id do processo, essa função irá dividir as linhas responsáveis pelo processo pelo valor de seus * respectivos pivots, o que irá fazer que sua diagonal seja igual a um. */ void pivoting (const int world_rank, const int world_size, int **matrix, const size_t matrix_size) { size_t matrix_col = matrix_size+1, i = 0, j = 0; size_t chunk = matrix_size/world_size, limit = 0, pivot = 0; if (NULL != matrix) { /* Calcula até qual linha o processo designado será responsável por pivotá-la. */ limit = (world_rank+1)*chunk; /* Cada processo fica reponsável pela sua quantidade de linhas apenas para pivotamento. */ #pragma omp parallel for for (i = world_rank*chunk; i < limit; i++) { pivot = matrix[i][i]; /* Caso o pivot seja zero, o sistema no final poderá ser do tipo possível, todavia, indeterminado. */ if (0 != pivot) { /* Como há interdependência dos dados nesse loop, se pode paralelizar a tarefa de dividir a linha pelo pivot sem maiores preocupações com dependência dos valores. */ #pragma omp parallel for for (j = 0; j < matrix_col; j++) { matrix[i][j] /= pivot; } } } } } /* * Transforma um array 2d em um array 1d. */ static void matrix_to_vector (int **matrix, int *vector, const size_t matrix_size) { size_t matrix_col = matrix_size+1, matrix_line = matrix_size, i = 0, j = 0, k = 0; if (NULL != matrix && NULL != vector) { for (; i < matrix_line; i++) { for (j = 0; j < matrix_col; j++) { vector[k++] = matrix[i][j]; } } } } /* * Transforma um array 1d em um array 2d. */ static void vector_to_matrix (int *vector, int **matrix, const size_t matrix_size) { size_t matrix_col = matrix_size+1, matrix_line = matrix_size, i = 0, j = 0, k = 0; if (NULL != matrix && NULL != vector) { for (; i < matrix_line; i++) { for (j = 0; j < matrix_col; j++) { matrix[i][j] = vector[k++]; } } } } /* * Junta a matrix que possui os pivotamentos anteriormente realizados com o atual. */ static void merge_pivoting (const int world_rank, const int world_size, int ** matrix, int *vector, const size_t matrix_size) { size_t chunk = matrix_size/world_size, limit = 0; size_t matrix_col = matrix_size+1, i = 0, j = 0, k = 0; if (NULL != matrix && NULL != vector) { limit = (world_rank+1)*chunk; k = (world_rank*chunk)*matrix_col; for (i = world_rank*chunk; i < limit; i++) { for (j = 0; j < matrix_col; j++) { vector[k++] = matrix[i][j]; } } } } /* * Junta todos os pivotamentos realizados om o da matrix que o processo responsável pivotou, em uma estrutura de anel. * Cada processo fica responsável por pivotar um número de linhas de maneira crescente ao seu ID. */ void merge_matrix (const int world_rank, const int world_size, int **matrix, const size_t matrix_size) { size_t matrix_col = matrix_size+1, matrix_line = matrix_size; int *vector = (int *) malloc(sizeof(int) * (matrix_line*matrix_col)); /* Uma estrutura de anel para passar as linhas do processo anterior que já foram pivotadas com a do processo atual e passar para o próximo processo. */ if (is_root(world_rank)) { matrix_to_vector(matrix, vector, matrix_size); MPI_Send(vector, matrix_line*matrix_col, MPI_INT, (world_rank+1)%world_size, 0, MPI_COMM_WORLD); } else if (!is_tail(world_rank, world_size)) { MPI_Recv(vector, matrix_line*matrix_col, MPI_INT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); /* Juntar a matrix com as linhas pivotadas até este processo com as pivotadas por este processo. */ merge_pivoting(world_rank, world_size, matrix, vector, matrix_size); MPI_Send(vector, matrix_line*matrix_col, MPI_INT, (world_rank+1)%world_size, 0, MPI_COMM_WORLD); } else { /* Quando o processo for o tail, ele apenas juntará toda a informação em uma matriz final que será utilizada posteriormente para zerar as colunas. */ MPI_Recv(vector, matrix_line*matrix_col, MPI_INT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); merge_pivoting(world_rank, world_size, matrix, vector, matrix_size); vector_to_matrix(vector, matrix, matrix_size); } free(vector); } /* * Dada a matriz já pivotada, se zera as colunas desses pivots. */ void clear_columns (int **matrix, const size_t matrix_size) { size_t matrix_col = matrix_size+1, matrix_line = matrix_size, i = 0, j = 0, k = 0, pivot = 0; float factor = 0; if (NULL != matrix) { /* Uma linha de cada vez da matrix será selecionada para zerar a coluna do seu pivot nas outras linhas. */ for (; i < matrix_line; i++) { pivot = matrix[i][i]; /* O pivot será zero quando alguma chamada anterior acabou por zerar a sua posição. */ if (0 != pivot) { for (j = 0; j < matrix_line; j++) { /* Não faz sentido procurar zerar a coluna na linha do prórprio pivot. */ if (i != j) { factor = matrix[j][i]/pivot; /* Na linha que se busca zerar a coluna, subtrair a linha do pivot. */ #pragma omp parallel for for (k = 0; k < matrix_col; k++) { matrix[j][k] -= factor*matrix[i][k]; } } } } } } }
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //NOTE: supressed all outputs except for Nthreads, time, and throughput, for ease of making graphs //n = 20; //printf("Entering %u bits (manual override)\n", n); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); //strcpy(message, "four"); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = (n-1)/8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: [ "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Converting to Ciphertext *Bonus* ------------------------------------------------------------------------- printf("\n*BONUS* Ciphertext: "); unsigned int lpi = 0; //letters (mod 26) per integer while(pow(26,lpi) < p){ lpi++; } //printf("LPI: %u\n", lpi); unsigned int* cs = malloc(lpi*sizeof(unsigned int)); unsigned int* cs2 = malloc(lpi*sizeof(unsigned int)); for(unsigned int i = 0; i < Nints; i++){ unsigned int temp = Zmessage[i]; unsigned int temp2 = a[i]; for(int j = lpi-1; j >= 0; j--){ cs[j] = (temp % 26) + 65; cs2[j] = (temp2 % 26) + 65; temp = temp / 26; temp2 = temp2 / 26; } for(int j = 0; j < lpi; j++) printf("%c", (unsigned char)cs[j]); printf(" "); for(int j = 0; j < lpi; j++) printf("%c", (unsigned char)cs2[j]); printf(" "); } free(cs); free(cs2); printf("\n\n"); //End Bonus ------------------------------------------------------------------------------------------------ //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel //printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); int found = 0; #pragma omp parallel for shared(found) for (unsigned int i=0; i < p-1; i++) { if(found == 1){ continue; } if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); found = 1; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); //printf("%u,%g,%g\n", Nthreads, totalTime, throughput); free(message); free(Zmessage); free(a); return 0; }
treegrav.c
/****************************************************************************/ /* TREEGRAV.C: routines to compute gravity. Public routines: gravcalc(). */ /* Copyright (c) 2001 by Joshua E. Barnes, Honolulu, Hawai`i. */ /****************************************************************************/ #include "stdinc.h" #include "mathfns.h" #include "vectmath.h" #include "treedefs.h" /* Local routines to perform force calculations. */ local void walktree(nodeptr *, nodeptr *, cellptr, cellptr, nodeptr, real, vector); local bool accept(nodeptr, real, vector); local void walksub(nodeptr *, nodeptr *, cellptr, cellptr, nodeptr, real, vector); local void gravsum(bodyptr, cellptr, cellptr); local void sumnode(cellptr, cellptr, vector, real *, vector); local void sumcell(cellptr, cellptr, vector, real *, vector); /* Lists of active nodes and interactions. */ #if !defined(FACTIVE) # define FACTIVE 0.75 /* active list fudge factor */ #endif local int actlen; /* length as allocated */ local nodeptr *active; /* list of nodes tested */ local cellptr interact; /* list of interactions */ /* * GRAVCALC: perform force calculation on all particles. */ void gravcalc(void) { double cpustart; vector rmid; actlen = FACTIVE * 216 * tdepth; /* estimate list length */ #if !defined(QUICKSCAN) actlen = actlen * rpow(theta, -2.5); /* allow for opening angle */ #endif active = (nodeptr *) allocate(actlen * sizeof(nodeptr)); interact = (cellptr) allocate(actlen * sizeof(cell)); cpustart = cputime(); /* record time, less alloc */ actmax = nbbcalc = nbccalc = 0; /* zero cumulative counters */ active[0] = (nodeptr) root; /* initialize active list */ CLRV(rmid); /* set center of root cell */ walktree(active, active + 1, interact, interact + actlen, (nodeptr) root, rsize, rmid); /* scan tree, update forces */ cpuforce = cputime() - cpustart; /* store CPU time w/o alloc */ free(active); free(interact); } /* * WALKTREE: do a complete walk of the tree, building the interaction * list level-by-level and computing the resulting force on each body. */ local void walktree(nodeptr *aptr, nodeptr *nptr, cellptr cptr, cellptr bptr, nodeptr p, real psize, vector pmid) { nodeptr *np, *ap, q; int actsafe; if (Update(p)) { /* are new forces needed? */ np = nptr; /* start new active list */ actsafe = actlen - NSUB; /* leave room for NSUB more */ #pragma omp parallel for private(ap) for (ap = aptr; ap < nptr; ap++) /* loop over active nodes */ if (Type(*ap) == CELL) { /* is this node a cell? */ if (accept(*ap, psize, pmid)) { /* does it pass the test? */ Mass(cptr) = Mass(*ap); /* copy to interaction list */ SETV(Pos(cptr), Pos(*ap)); SETM(Quad(cptr), Quad(*ap)); cptr++; /* and bump cell array ptr */ } else { /* else it fails the test */ if (np - active >= actsafe) /* check list has room */ error("walktree: active list overflow\n"); for (q = More(*ap); q != Next(*ap); q = Next(q)) /* loop over all subcells */ *np++= q; /* put on new active list */ } } else /* else this node is a body */ if (*ap != p) { /* if not self-interaction */ --bptr; /* bump body array ptr */ Mass(bptr) = Mass(*ap); /* and copy data to array */ SETV(Pos(bptr), Pos(*ap)); } actmax = MAX(actmax, np - active); /* keep track of max active */ if (np != nptr) /* if new actives listed */ walksub(nptr, np, cptr, bptr, p, psize, pmid); /* then visit next level */ else { /* else no actives left, so */ if (Type(p) != BODY) /* must have found a body */ error("walktree: recursion terminated with cell\n"); gravsum((bodyptr) p, cptr, bptr); /* sum force on the body */ } } } #if defined(QUICKSCAN) /* * ACCEPT: quick criterion accepts any cell not touching cell p. */ local bool accept(nodeptr c, real psize, vector pmid) { real p15, dk; p15 = ((real) 1.5) * psize; /* premultiply cell size */ dk = Pos(c)[0] - pmid[0]; /* find distance to midpnt */ if (ABS(dk) > p15) /* if c does not touch p */ return (TRUE); /* then accept interaction */ dk = Pos(c)[1] - pmid[1]; /* find distance to midpnt */ if (ABS(dk) > p15) /* if c does not touch p */ return (TRUE); /* then accept interaction */ dk = Pos(c)[2] - pmid[2]; /* find distance to midpnt */ if (ABS(dk) > p15) /* if c does not touch p */ return (TRUE); /* then accept interaction */ return (FALSE); /* else do not accept it */ } #else /* * ACCEPT: standard criterion accepts cell if its critical radius * does not intersect cell p, and also imposes above condition. */ local bool accept(nodeptr c, real psize, vector pmid) { real dmax, dsq, dk; int k; dmax = psize; /* init maximum distance */ dsq = 0.0; /* and squared min distance */ #pragma omp parallel for private(k) for (k = 0; k < NDIM; k++) { /* loop over space dims */ dk = Pos(c)[k] - pmid[k]; /* form distance to midpnt */ if (dk < 0) /* and get absolute value */ dk = - dk; if (dk > dmax) /* keep track of max value */ dmax = dk; dk -= ((real) 0.5) * psize; /* allow for size of cell */ if (dk > 0) dsq += dk * dk; /* sum min dist to cell ^2 */ } return (dsq > Rcrit2(c) && /* test angular criterion */ dmax > ((real) 1.5) * psize); /* and adjacency criterion */ } #endif /* * WALKSUB: test next level's active list against subnodes of p. */ local void walksub(nodeptr *nptr, nodeptr *np, cellptr cptr, cellptr bptr, nodeptr p, real psize, vector pmid) { real poff; nodeptr q; int k; vector nmid; poff = psize / 4; /* precompute mid. offset */ if (Type(p) == CELL) { /* fanout over descendents */ for (q = More(p); q != Next(p); q = Next(q)) { /* loop over all subcells */ for (k = 0; k < NDIM; k++) /* locate each's midpoint */ nmid[k] = pmid[k] + (Pos(q)[k] < pmid[k] ? - poff : poff); walktree(nptr, np, cptr, bptr, q, psize / 2, nmid); /* recurse on subcell */ } } else { /* extend virtual tree */ for (k = 0; k < NDIM; k++) /* locate next midpoint */ nmid[k] = pmid[k] + (Pos(p)[k] < pmid[k] ? - poff : poff); walktree(nptr, np, cptr, bptr, p, psize / 2, nmid); /* and search next level */ } } /* * GRAVSUM: compute gravitational field at body p0. */ local void gravsum(bodyptr p0, cellptr cptr, cellptr bptr) { vector pos0, acc0; real phi0; SETV(pos0, Pos(p0)); /* copy position of body */ phi0 = 0.0; /* init total potential */ CLRV(acc0); /* and total acceleration */ if (usequad) /* if using quad moments */ sumcell(interact, cptr, pos0, &phi0, acc0); /* sum cell forces w quads */ else /* not using quad moments */ sumnode(interact, cptr, pos0, &phi0, acc0); /* sum cell forces wo quads */ sumnode(bptr, interact + actlen, pos0, &phi0, acc0); /* sum forces from bodies */ Phi(p0) = phi0; /* store total potential */ SETV(Acc(p0), acc0); /* and total acceleration */ nbbcalc += interact + actlen - bptr; /* count body-body forces */ nbccalc += cptr - interact; /* count body-cell forces */ } /* * SUMNODE: add up body-node interactions. */ local void sumnode(cellptr start, cellptr finish, vector pos0, real *phi0, vector acc0) { cellptr p; real eps2, dr2, drab, phi_p, mr3i; vector dr; eps2 = eps * eps; /* avoid extra multiplys */ for (p = start; p < finish; p++) { /* loop over node list */ DOTPSUBV(dr2, dr, Pos(p), pos0); /* compute separation */ /* and distance squared */ dr2 += eps2; /* add standard softening */ drab = rsqrt(dr2); /* form scalar "distance" */ phi_p = Mass(p) / drab; /* get partial potential */ *phi0 -= phi_p; /* decrement tot potential */ mr3i = phi_p / dr2; /* form scale factor for dr */ ADDMULVS(acc0, dr, mr3i); /* sum partial acceleration */ } } /* * SUMCELL: add up body-cell interactions. */ local void sumcell(cellptr start, cellptr finish, vector pos0, real *phi0, vector acc0) { cellptr p; real eps2, dr2, drab, phi_p, mr3i, drqdr, dr5i, phi_q; vector dr, qdr; eps2 = eps * eps; #pragma omp parallel for private(p) for (p = start; p < finish; p++) { /* loop over node list */ DOTPSUBV(dr2, dr, Pos(p), pos0); /* do mono part of force */ dr2 += eps2; drab = rsqrt(dr2); phi_p = Mass(p) / drab; mr3i = phi_p / dr2; DOTPMULMV(drqdr, qdr, Quad(p), dr); /* do quad part of force */ dr5i = ((real) 1.0) / (dr2 * dr2 * drab); phi_q = ((real) 0.5) * dr5i * drqdr; *phi0 -= phi_p + phi_q; /* add mono and quad pot */ mr3i += ((real) 5.0) * phi_q / dr2; ADDMULVS2(acc0, dr, mr3i, qdr, -dr5i); /* add mono and quad acc */ } }
FeatureLPPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/FeatureLPPooling.c" #else #ifndef FEATURE_LP_DEFS #define FEATURE_LP_DEFS #ifdef _MSC_VER #define FEATURE_LP_SIZE_TYPE int64_t #define FEATURE_LP_CAST_TYPE (int64_t) #else #define FEATURE_LP_SIZE_TYPE size_t #define FEATURE_LP_CAST_TYPE #endif typedef struct { size_t size[4]; size_t stride[4]; } FeatureLPPoolingSizes; static inline size_t flpGetOffset(FeatureLPPoolingSizes* s, FEATURE_LP_SIZE_TYPE batch, FEATURE_LP_SIZE_TYPE feature, FEATURE_LP_SIZE_TYPE opt1, FEATURE_LP_SIZE_TYPE opt2) { return s->stride[0] * batch + s->stride[1] * feature + s->stride[2] * opt1 + s->stride[3] * opt2; } static inline size_t flpOutputSize(FEATURE_LP_SIZE_TYPE inputSize, FEATURE_LP_SIZE_TYPE width, FEATURE_LP_SIZE_TYPE stride) { return ((inputSize - width) / stride) + 1; } #endif // FEATURE_LP_DEFS FeatureLPPoolingSizes THNN_(FeatureLPPooling_upcastCPU)(THTensor* t, bool batchMode) { int dim = THTensor_(nDimension)(t); // Upcast to [batch dim][feature dim][opt dim 1][opt dim 2] FeatureLPPoolingSizes s; for (int i = 0; i < 4; ++i) { s.size[i] = 1; s.stride[i] = 1; } if (dim == 1) { THAssert(!batchMode); // [feature dim] s.size[1] = THTensor_(size)(t, 0); s.stride[1] = THTensor_(stride)(t, 0); } else if (dim == 2) { if (batchMode) { // [batch dim][feature dim] for (int i = 0; i < 2; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } else { // [feature dim][opt dim 1] s.size[1] = THTensor_(size)(t, 0); s.stride[1] = THTensor_(stride)(t, 0); s.size[2] = THTensor_(size)(t, 1); s.stride[2] = THTensor_(stride)(t, 1); } } else if (dim == 3) { if (batchMode) { // [batch dim][feature dim][opt dim 1] for (int i = 0; i < 3; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } else { // [feature dim][opt dim 1][opt dim 2] for (int i = 1; i < 4; ++i) { s.size[i] = THTensor_(size)(t, i - 1); s.stride[i] = THTensor_(stride)(t, i - 1); } } } else if (dim == 4) { // [batch dim][feature dim][opt dim 1][opt dim 2] THAssert(batchMode); for (int i = 0; i < 4; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } return s; } void THNN_(FeatureLPPooling_resizeForOutputCPU)(THTensor* toResize, THTensor* input, bool batchMode, int width, int stride) { int inputDim = THTensor_(nDimension)(input); THAssert(inputDim >= 1 && inputDim <= 4); int64_t outSize = flpOutputSize(THTensor_(size)(input, 0), width, stride); if (batchMode) { THAssert(inputDim > 1); outSize = flpOutputSize(THTensor_(size)(input, 1), width, stride); } else { THAssert(inputDim < 4); } if (inputDim == 1) { THTensor_(resize1d)(toResize, outSize); } else if (inputDim == 2) { if (batchMode) { THTensor_(resize2d)(toResize, THTensor_(size)(input, 0), outSize); } else { THTensor_(resize2d)(toResize, outSize, THTensor_(size)(input, 1)); } } else if (inputDim == 3) { if (batchMode) { THTensor_(resize3d)(toResize, THTensor_(size)(input, 0), outSize, THTensor_(size)(input, 2)); } else { THTensor_(resize3d)(toResize, outSize, THTensor_(size)(input, 1), THTensor_(size)(input, 2)); } } else if (inputDim == 4) { THTensor_(resize4d)(toResize, THTensor_(size)(input, 0), outSize, THTensor_(size)(input, 2), THTensor_(size)(input, 3)); } } // Makes `toResize` the same size/dimensionality as `src` void THNN_(FeatureLPPooling_resizeCPU)(THTensor* toResize, THTensor* src) { int inputDim = THTensor_(nDimension)(src); THAssert(inputDim >= 1 && inputDim <= 4); if (inputDim == 1) { THTensor_(resize1d)(toResize, THTensor_(size)(src, 0)); } else if (inputDim == 2) { THTensor_(resize2d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1)); } else if (inputDim == 3) { THTensor_(resize3d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1), THTensor_(size)(src, 2)); } else if (inputDim == 4) { THTensor_(resize4d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1), THTensor_(size)(src, 2), THTensor_(size)(src, 3)); } } void THNN_(FeatureLPPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, accreal power, int width, int stride, bool batchMode) { int inputDim = THTensor_(nDimension)(input); if (batchMode) { THArgCheck(inputDim >= 2 && inputDim <= 4, 2, "input must be 2-4 dimensions for batch mode"); } else { THArgCheck(inputDim >= 1 && inputDim <= 3, 2, "input must be 1-3 dimensions for non-batch mode"); } FeatureLPPoolingSizes inputDesc = THNN_(FeatureLPPooling_upcastCPU)(input, batchMode); // Make sure the feature dimension is properly sized THArgCheck(inputDesc.size[1] >= width, 3, "input: feature dimension must be >= width"); // Make sure that width and stride are within range THArgCheck(width >= 2 && width <= 16, 5, "width must be between 2 - 16"); THArgCheck(stride >= 1 && stride <= 4, 6, "stride must be between 1 - 4"); // Resize output THNN_(FeatureLPPooling_resizeForOutputCPU)( output, input, batchMode, width, stride); FeatureLPPoolingSizes outputDesc = THNN_(FeatureLPPooling_upcastCPU)(output, batchMode); real* inputP = THTensor_(data)(input); real* outputP = THTensor_(data)(output); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; #pragma omp parallel for for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) { for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) { for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) { for (outputFeature = 0; outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) { accreal v = (accreal) 0; for (i = 0; i < width; ++i) { FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i; if (inputFeature >= FEATURE_LP_CAST_TYPE inputDesc.size[1]) { break; } v += pow(inputP[flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)], power); } outputP[flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)] = pow(v, (accreal) 1 / power); } } } } } void THNN_(FeatureLPPooling_updateGradInput)( THNNState *state, THTensor* gradOutput, THTensor* input, THTensor* output, THTensor* gradInput, accreal power, int width, int stride, bool batchMode) { int inputDim = THTensor_(nDimension)(input); if (batchMode) { THArgCheck(inputDim >= 2 && inputDim <= 4, 3, "input must be 2-4 dimensions for batch mode"); } else { THArgCheck(inputDim >= 1 && inputDim <= 3, 3, "input must be 1-3 dimensions for non-batch mode"); } FeatureLPPoolingSizes inputDesc = THNN_(FeatureLPPooling_upcastCPU)(input, batchMode); FeatureLPPoolingSizes gradOutputDesc = THNN_(FeatureLPPooling_upcastCPU)(gradOutput, batchMode); FeatureLPPoolingSizes outputDesc = THNN_(FeatureLPPooling_upcastCPU)(output, batchMode); // Make sure the feature dimension is properly sized THArgCheck(inputDesc.size[1] >= width, 3, "input: feature dimension must be >= width"); // Make sure that width and stride are within range THArgCheck(width >= 2 && width <= 16, 7, "width must be between 2 - 16"); THArgCheck(stride >= 1 && stride <= 4, 8, "stride must be between 1 - 4"); for (int i = 0; i < 4; ++i) { THAssertMsg(outputDesc.size[i] == gradOutputDesc.size[i], "output and gradOutput sizes do not match"); } // Make sure that the input sizes produce the output sizes THArgCheck(flpOutputSize(FEATURE_LP_CAST_TYPE inputDesc.size[1], width, stride) == outputDesc.size[1], 3, "input and output sizes do not match with respect to " "width and stride"); // Resize `gradInput` based on `input` THNN_(FeatureLPPooling_resizeCPU)(gradInput, input); // Zero gradInput for accumulation THTensor_(zero)(gradInput); FeatureLPPoolingSizes gradInputDesc = THNN_(FeatureLPPooling_upcastCPU)(gradInput, batchMode); real* gradOutputP = THTensor_(data)(gradOutput); real* gradInputP = THTensor_(data)(gradInput); real* outputP = THTensor_(data)(output); real* inputP = THTensor_(data)(input); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; #pragma omp parallel for for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) { for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) { for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) { for (outputFeature = 0; outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) { // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. real outputV = outputP[ flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)]; if (outputV == (real) 0) { continue; } for (i = 0; i < width; ++i) { FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i; THAssert(inputFeature < inputDesc.size[1]); real gradOutputV = gradOutputP[ flpGetOffset(&gradOutputDesc, batch, outputFeature, opt1, opt2)]; real inputV = inputP[ flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)]; // Calculate grad * (x_i / f(x_is))^(p - 1) real v = gradOutputV * pow(inputV / outputV, power - (accreal) 1); gradInputP[ flpGetOffset(&gradInputDesc, batch, inputFeature, opt1, opt2)] += v; } } } } } } #endif
declare-variant-10.c
/* { dg-do compile } */ /* { dg-additional-options "-foffload=disable -fdump-tree-gimple" } */ /* { dg-additional-options "-mavx512bw" { target { i?86-*-* x86_64-*-* } } } */ #undef i386 void f01 (void); #pragma omp declare variant (f01) match (device={isa(avx512f,avx512bw)}) void f02 (void); void f03 (void); #pragma omp declare variant (f03) match (device={kind("any"),arch(x86_64),isa(avx512f,avx512bw)}) void f04 (void); void f05 (void); #pragma omp declare variant (f05) match (device={kind(gpu)}) void f06 (void); void f07 (void); #pragma omp declare variant (f07) match (device={kind(cpu)}) void f08 (void); void f09 (void); #pragma omp declare variant (f09) match (device={isa(sm_35)}) void f10 (void); void f11 (void); #pragma omp declare variant (f11) match (device={arch("nvptx")}) void f12 (void); void f13 (void); #pragma omp declare variant (f13) match (device={arch(i386),isa("sse4")}) void f14 (void); void f15 (void); #pragma omp declare variant (f15) match (device={isa(sse4,ssse3),arch(i386)}) void f16 (void); void f17 (void); #pragma omp declare variant (f17) match (device={kind(any,fpga)}) void f18 (void); #pragma omp declare target void test1 (void) { int i; f02 (); /* { dg-final { scan-tree-dump-times "f01 \\\(\\\);" 1 "gimple" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump-times "f02 \\\(\\\);" 1 "gimple" { target { ! { i?86-*-* x86_64-*-* } } } } } */ f14 (); /* { dg-final { scan-tree-dump-times "f13 \\\(\\\);" 1 "gimple" { target ia32 } } } */ /* { dg-final { scan-tree-dump-times "f14 \\\(\\\);" 1 "gimple" { target { ! ia32 } } } } */ f18 (); /* { dg-final { scan-tree-dump-times "f18 \\\(\\\);" 1 "gimple" } } */ } #pragma omp end declare target #if defined(__i386__) || defined(__x86_64__) __attribute__((target ("avx512f,avx512bw"))) #endif void test2 (void) { #pragma omp target f04 (); /* { dg-final { scan-tree-dump-times "f03 \\\(\\\);" 1 "gimple" { target { { i?86-*-* x86_64-*-* } && lp64 } } } } */ /* { dg-final { scan-tree-dump-times "f04 \\\(\\\);" 1 "gimple" { target { { ! lp64 } || { ! { i?86-*-* x86_64-*-* } } } } } } */ #pragma omp target f16 (); /* { dg-final { scan-tree-dump-times "f15 \\\(\\\);" 1 "gimple" { target ia32 } } } */ /* { dg-final { scan-tree-dump-times "f16 \\\(\\\);" 1 "gimple" { target { ! ia32 } } } } */ } void test3 (void) { f06 (); /* { dg-final { scan-tree-dump-times "f06 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */ f08 (); /* { dg-final { scan-tree-dump-times "f07 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */ } #pragma omp declare target to (test3) void test4 (void) { #pragma omp target f10 (); /* { dg-final { scan-tree-dump-times "f10 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */ #pragma omp target f12 (); /* { dg-final { scan-tree-dump-times "f12 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* } } } } } */ /* { dg-final { scan-tree-dump-times "f11 \\\(\\\);" 1 "gimple" { target { nvptx*-*-* } } } } */ }
gsrb.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdint.h> #include "../timer.h" //------------------------------------------------------------------------------------------------------------------------------ #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) //------------------------------------------------------------------------------------------------------------------------------ // better solution would be to adapt the box size as the problem shrinks... // i.e. fix unit stride at 4KB and calculate BlockJ = ((STANZA+dim.i-1)/dim.i) // similarly, fix BlockK to get some reuse and have enough tasks... //------------------------------------------------------------------------------------------------------------------------------ // Kludge for now... #define BlockJ 16 #define BlockK 4 //------------------------------------------------------------------------------------------------------------------------------ void __box_smooth_GSRB_multiple(box_type *box, int phi_id, int rhs_id, double a, double b, int s){ int jj,kk; int pencil = box->pencil; int plane = box->plane; int ghosts = box->ghosts; double h2inv = 1.0/(box->h*box->h); double * __restrict__ phi = box->grids[ phi_id] + ghosts*plane + ghosts*pencil + ghosts; // i.e. [0] = first non ghost zone point double * __restrict__ phi_new= box->grids[ phi_id] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ rhs = box->grids[ rhs_id] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ alpha = box->grids[__alpha ] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ beta_i = box->grids[__beta_i] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ beta_j = box->grids[__beta_j] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ beta_k = box->grids[__beta_k] + ghosts*plane + ghosts*pencil + ghosts; double * __restrict__ lambda = box->grids[__lambda] + ghosts*plane + ghosts*pencil + ghosts; int ghostsToOperateOn=ghosts-1; int ss; int big_box=0; // don't subdivide small boxes into tasks (too much overhead from omp task...) if(box->dim.k>8)big_box=1; if(box->dim.j>8)big_box=1; // do ghosts iterations on this list of tasks... for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){ // iterate through all cache blocks within this box and queue a task... for(kk=0-ghostsToOperateOn;kk<box->dim.k+ghostsToOperateOn;kk+=BlockK){ for(jj=0-ghostsToOperateOn;jj<box->dim.j+ghostsToOperateOn;jj+=BlockJ){ #pragma omp task if(big_box) { int i,j,k; int highK,highJ; highK = MIN(kk+BlockK,box->dim.k+ghostsToOperateOn); highJ = MIN(jj+BlockJ,box->dim.j+ghostsToOperateOn); #if defined(__GSRB_CONDITIONAL) #warning GSRB on every point with conditional assignment for Red-Black for(k=kk;k<highK;k++){ for(j=jj;j<highJ;j++){ #pragma simd always for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){ int ijk = i + j*pencil + k*plane; int doit = ((i^(j^k^ss^1))&1); double helmholtz = a*alpha[ijk]*phi[ijk] -b*h2inv*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) -beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) +beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) -beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) +beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) -beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); //double delta = doit ? lambda[ijk]*(helmholtz-rhs[ijk]) : 0.0; //phi_new[ijk] = phi[ijk] - delta; phi_new[ijk] = doit ? phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]) : phi[ijk]; }}} #elif defined(__GSRB_STRIDE2) #warning GSRB using stride-2 accesses for(k=kk;k<highK;k++){ for(j=jj;j<highJ;j++){ for(i=((j^k^ss^1)&1)+1-ghosts;i<box->dim.i+ghostsToOperateOn;i+=2){ // stride-2 GSRB int ijk = i + j*pencil + k*plane; double helmholtz = a*alpha[ijk]*phi[ijk] -b*h2inv*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) -beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) +beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) -beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) +beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) -beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]); }}} #elif defined(__GSRB_FP) #warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black for(k=kk;k<highK;k++){int EvenOdd = (k^ss)&1; for(j=jj;j<highJ;j++){ for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){ int ij = i + j*pencil; int ijk = i + j*pencil + k*plane; double helmholtz = a*alpha[ijk]*phi[ijk] -b*h2inv*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) -beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) +beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) -beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) +beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) -beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); phi_new[ijk] = phi[ijk] - RedBlack[EvenOdd][ij]*lambda[ijk]*(helmholtz-rhs[ijk]); // compiler seems to get confused unless there are disjoint read/write pointers }}} #else #warning GSRB using if-then-else on loop indices for Red-Black for(k=kk;k<highK;k++){ for(j=jj;j<highJ;j++){ for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){ if((i^j^k^ss^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 int ijk = i + j*pencil + k*plane; double helmholtz = a*alpha[ijk]*phi[ijk] -b*h2inv*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) -beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) +beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) -beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) +beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) -beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]); }}}} #endif }}} // If doing communication avoiding, we dependent tasks cannot get too far ahead. // As I have no idea how to perform p2p synchronization among omp tasks, I'll just barrier... if(ghostsToOperateOn>0){ #pragma omp taskwait } } // ss } //------------------------------------------------------------------------------------------------------------------------------ void smooth(domain_type * domain, int level, int phi_id, int rhs_id, double a, double b){ int box,s; int ghosts = domain->ghosts; // if communication-avoiding, need RHS for stencils in ghost zones if(ghosts>1)exchange_boundary(domain,level,rhs_id,1,1,1); for(s=0;s<numSmooths;s+=ghosts){ exchange_boundary(domain,level,phi_id,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding... uint64_t _timeStart = CycleTime(); #pragma omp parallel { int box; #pragma omp for private(box) nowait // <<< needs to be omp for rather than single in order to get enough task injection. <<< needs to be no wait to ensure idle cores can grab tasks asap for(box=0;box<domain->subdomains_per_rank;box++){ __box_smooth_GSRB_multiple(&domain->subdomains[box].levels[level],phi_id,rhs_id,a,b,s); } } domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart); } }
GB_unop__one_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_uint16_uint16 // op(A') function: GB_unop_tran__one_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <LightGBM/random_generator.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Construtor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scorese * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the traning process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; } shrinkage_ *= rate; } inline void proportional_prune(int iter, float base, std::string boost_method, float g_m, int geo_clip) { float threshold; // if(iter <= 20) threshold = 1; // else if(boost_method == std::string("DPBoost") || boost_method == std::string("DPBoost_bagging") || boost_method == std::string("DPBoost_2level")) { if(geo_clip) threshold = (float) std::pow(base, iter) * g_m; else threshold = 1000; } else{ threshold = 1; } // threshold = 1; // float threshold = 0.1; for (int i = 0; i < num_leaves_; ++i) { // std::cout<<leaf_value_[i]<<" "; if(threshold < std::fabs(leaf_value_[i])){ leaf_value_[i] = leaf_value_[i] > 0 ? threshold : -threshold; } // std::cout<<leaf_value_[i]<<" "; } } inline void add_noise(float scale, int seed){ for(int i = 0; i < num_leaves_; i++) { Laplace lap(scale, seed); std::cout<<leaf_value_[i]<<" "; float noise = lap.return_a_random_variable(); leaf_value_[i] += noise; std::cout<<leaf_value_[i]<<" "; } } inline void add_noise(Laplace& lap, float scale){ // Laplace lap(scale, seed); for(int i = 0; i < num_leaves_; i++) { // int seed = std::chrono::system_clock::now().time_since_epoch().count(); // Laplace lap(scale, seed); // std::cout<<leaf_value_[i]<<" "; float noise = lap.return_a_random_variable(scale); leaf_value_[i] += noise; // std::cout<<leaf_value_[i]<<" "; } } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] = val + leaf_value_[i]; } // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return true; } else { return false; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current levas*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and mising value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = Common::AvoidInf(gain); // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK(max_depth_ >= 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
stencil.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "malloc2D.h" #include "timer.h" #define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp) int main(int argc, char *argv[]) { #pragma omp parallel #pragma omp master printf("Running with %d thread(s)\n",omp_get_num_threads()); /* struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total; double init_time, flush_time, stencil_time, total_time; int imax=2002, jmax = 2002; double** xtmp; double** x = malloc2D(jmax, imax); double** xnew = malloc2D(jmax, imax); int *flush = (int *)malloc(jmax*imax*sizeof(int)*4); // cpu_timer_start(&tstart_total); // cpu_timer_start(&tstart_init); #pragma omp parallel for for (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ xnew[j][i] = 0.0; x[j][i] = 5.0; } } #pragma omp parallel for for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){ for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } } #ifdef XXX // init_time += cpu_timer_stop(tstart_init); for (int iter = 0; iter < 10000; iter++){ // cpu_timer_start(&tstart_flush); #pragma omp parallel for for (int l = 1; l < jmax*imax*4; l++){ flush[l] = 1.0; } // flush_time += cpu_timer_stop(tstart_flush); // cpu_timer_start(&tstart_stencil); #pragma omp parallel for for (int j = 1; j < jmax-1; j++){ for (int i = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0; } } // stencil_time += cpu_timer_stop(tstart_stencil); SWAP_PTR(xnew, x, xtmp); if (iter%1000 == 0) printf("Iter %d\n",iter); } // total_time += cpu_timer_stop(tstart_total); // printf("Timing is init %f flush %f stencil %f total %f\n", // init_time,flush_time,stencil_time,total_time); #endif */ }
seidel-2d.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 20x1000. */ #include "seidel-2d.h" /* Array initialization. */ static void init_array (int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ") final)"))); for (i = 0; i < n; i++) for (j = 0; j < n; j++) A[i][j] = ((DATA_TYPE) i*(j+2) + 2) / n; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]); if ((i * n + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_seidel_2d(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int t, i, j; #pragma scop #pragma omp parallel private (t,i,j) { #pragma omp master { for (t = 0; t <= _PB_TSTEPS - 1; t++) { #pragma omp parallel for schedule(static) collapse (2) for (i = 1; i<= _PB_N - 2; i++) { for (j = 1; j <= _PB_N - 2; j++) { A[i][j] = (A[i-1][j-1] + A[i-1][j] + A[i-1][j+1] + A[i][j-1] + A[i][j] + A[i][j+1] + A[i+1][j-1] + A[i+1][j] + A[i+1][j+1])/9.0; } } } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE __attribute__((annotate("target('A') scalar()"))), N, N, n, n); /* Initialize array(s). */ init_array (n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_seidel_2d (tsteps, n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }
SparseTranspose.h
/** * This file contains (modified) code from the Eigen library. * Eigen License: * * Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> * Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com> * * This Source Code Form is subject to the terms of the Mozilla * Public License v. 2.0. If a copy of the MPL was not distributed * with this file, You can obtain one at http://mozilla.org/MPL/2.0/. * * * ====================== * * The modifications are part of the Eigen Recursive Matrix Extension (ERME). * ERME License: * * Copyright (c) 2019 Darius Rückert * Licensed under the MIT License. */ #pragma once #include "SparseHelper.h" #include "Transpose.h" #include <iostream> namespace Eigen::Recursive { /** * Sparse Matrix Transposition. * This is basically a copy and paste from Eigen/src/SparseCore/SparseMatrix.h :: operator= * * The only difference is that we call transpose recursivly on each element when assigning them. * * There are also two additional methods that only transpose the structure/values. * This is used for optimization problems with well known structures. In these cases * the structure can be precomputed. * */ template <typename G, typename H, int options> void transpose(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest) { static_assert(options == Eigen::RowMajor, "todo"); using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>; using namespace Eigen; // SparseMatrix dest(other.rows(),other.cols()); // dest.resize(other.rows(), other.cols()); dest.resize(other.cols(), other.rows()); Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j = 0; j < other.outerSize(); ++j) for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()]; // prefix sum Index count = 0; typename SparseMatrix::IndexVector positions(dest.outerSize()); for (Index j = 0; j < dest.outerSize(); ++j) { auto tmp = dest.outerIndexPtr()[j]; dest.outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } dest.outerIndexPtr()[dest.outerSize()] = count; // alloc // dest.m_data.resize(count); dest.reserve(count); // pass 2 for (Index j = 0; j < other.outerSize(); ++j) { for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) { Index pos = positions[it.index()]++; dest.innerIndexPtr()[pos] = j; dest.valuePtr()[pos].get() = transpose(it.value()).get(); } } } template <typename G, typename H, int options> void transposeStructureOnly(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest) { static_assert(options == Eigen::RowMajor, "todo"); using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>; using namespace Eigen; // SparseMatrix dest(other.rows(),other.cols()); dest.resize(other.cols(), other.rows()); Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j = 0; j < other.outerSize(); ++j) for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()]; // prefix sum Index count = 0; typename SparseMatrix::IndexVector positions(dest.outerSize()); for (Index j = 0; j < dest.outerSize(); ++j) { auto tmp = dest.outerIndexPtr()[j]; dest.outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } dest.outerIndexPtr()[dest.outerSize()] = count; // alloc dest.reserve(count); // pass 2 for (Index j = 0; j < other.outerSize(); ++j) { // int op = other.outerIndexPtr()[j]; int i = 0; for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i) { Index pos = positions[it.index()]++; dest.innerIndexPtr()[pos] = j; } } } template <typename G, typename H, int options> void transposeStructureOnly_omp(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest, std::vector<int>& transposeTargets) { static_assert(options == Eigen::RowMajor, "todo"); using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>; using namespace Eigen; // SparseMatrix dest(other.rows(),other.cols()); dest.resize(other.cols(), other.rows()); Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j = 0; j < other.outerSize(); ++j) for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()]; // prefix sum Index count = 0; typename SparseMatrix::IndexVector positions(dest.outerSize()); for (Index j = 0; j < dest.outerSize(); ++j) { auto tmp = dest.outerIndexPtr()[j]; dest.outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } dest.outerIndexPtr()[dest.outerSize()] = count; // alloc dest.reserve(count); transposeTargets.resize(count); // pass 2 for (Index j = 0; j < other.outerSize(); ++j) { int op = other.outerIndexPtr()[j]; int i = 0; for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i) { int rel = op + i; Index pos = positions[it.index()]++; transposeTargets[rel] = pos; dest.innerIndexPtr()[pos] = j; } } } template <typename G, typename H, int options> void transposeValueOnly(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest) { static_assert(options == Eigen::RowMajor, "todo"); using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>; using namespace Eigen; std::vector<int> positions(dest.outerSize(), 0); for (Index j = 0; j < other.outerSize(); ++j) { for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) { Index pos = dest.outerIndexPtr()[it.index()] + positions[it.index()]++; dest.valuePtr()[pos].get() = transpose(it.value()).get(); } } } template <typename G, typename H, int options> void transposeValueOnly_omp(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest, const std::vector<int>& transposeTargets) { static_assert(options == Eigen::RowMajor, "todo"); using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>; using namespace Eigen; // std::vector<int> positions(dest.outerSize(), 0); #pragma omp for for (Index j = 0; j < other.outerSize(); ++j) { int op = other.outerIndexPtr()[j]; int i = 0; for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i) { int rel = op + i; int pos = transposeTargets[rel]; // Index pos = dest.outerIndexPtr()[it.index()] + positions[it.index()]++; dest.valuePtr()[pos].get() = transpose(it.value()).get(); } } } } // namespace Eigen::Recursive
GB_unaryop__ainv_uint8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_fp32 // op(A') function: GB_tran__ainv_uint8_fp32 // C type: uint8_t // A type: float // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_fp32 ( uint8_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
carray_test.c
/* --------------------------------------------------------------------------- carray_test.c This file is part of Ruby/CArray extension library. Copyright (C) 2005-2020 Hiroki Motoyoshi This file includes the modified routine (ca_mem_hash) from * string.c in Ruby distribution ( ruby-1.8.6 ) Copyright (C) 1993-2003 Yukihiro Matsumoto Copyright (C) 2000 Network Applied Communication Laboratory, Inc. Copyright (C) 2000 Information-technology Promotion Agency, Japan ---------------------------------------------------------------------------- */ #include "carray.h" static ID id_equal; /* various checking routine */ void ca_check_type (void *ap, int8_t data_type) { CArray *ca = (CArray *) ap; if ( ca->data_type != data_type ) { rb_raise(rb_eCADataTypeError, "data_type mismatch"); } } void ca_check_ndim (void *ap, int ndim) { CArray *ca = (CArray *) ap; if ( ! ca_is_scalar(ca) ) { if ( ca->ndim != ndim ) { rb_raise(rb_eRuntimeError, "ndim mismatch"); } } } void ca_check_shape (void *ap, int ndim, ca_size_t *dim) { CArray *ca = (CArray *) ap; int i; if ( ! ca_is_scalar(ca) ) { if ( ca->ndim != ndim ) { rb_raise(rb_eRuntimeError, "shape mismatch"); } for (i=0; i<ndim; i++) { if ( ca->dim[i] != dim[i] ) { rb_raise(rb_eRuntimeError, "shape mismatch"); } } } } void ca_check_same_data_type (void *ap1, void *ap2) { CArray *ca1 = (CArray *) ap1; CArray *ca2 = (CArray *) ap2; if ( ca1->data_type != ca2->data_type ) { rb_raise(rb_eCADataTypeError, "data_type mismatch"); } } void ca_check_same_ndim (void *ap1, void *ap2) { CArray *ca1 = (CArray *) ap1; CArray *ca2 = (CArray *) ap2; if ( ca1->ndim != ca2->ndim ) { rb_raise(rb_eRuntimeError, "ndim mismatch"); } } void ca_check_same_elements (void *ap1, void *ap2) { CArray *ca1 = (CArray *) ap1; CArray *ca2 = (CArray *) ap2; if ( ca1->elements != ca2->elements ) { rb_raise(rb_eRuntimeError, "elements mismatch"); } } void ca_check_same_shape (void *ap1, void *ap2) { CArray *ca1 = (CArray *) ap1; CArray *ca2 = (CArray *) ap2; int i; if ( ( ! ca_is_scalar(ca1) ) && ( ! ca_is_scalar(ca2) ) ) { if ( ca1->ndim != ca2->ndim ) { rb_raise(rb_eRuntimeError, "shape mismatch"); } for (i=0; i<ca1->ndim; i++) { if ( ca1->dim[i] != ca2->dim[i] ) { rb_raise(rb_eRuntimeError, "shape mismatch"); } } } } void ca_check_index (void *ap, ca_size_t *idx) { CArray *ca = (CArray *) ap; int i; for (i=0; i<ca->ndim; i++) { if ( idx[i] < 0 || idx[i] >= ca->dim[i] ) { rb_raise(rb_eRuntimeError, "invalid index"); } } } void rb_check_carray_object (VALUE arg) { if ( ! rb_obj_is_carray(arg) ) { rb_raise(rb_eRuntimeError, "CArray required"); } } /* various predicate routine */ int ca_has_same_shape (void *ap1, void *ap2) { CArray *ca1 = (CArray *) ap1; CArray *ca2 = (CArray *) ap2; int i; if ( ca_is_scalar(ca1) || ca_is_scalar(ca2) ) { return 1; } else if ( ca1->ndim != ca2->ndim ) { return 0; } else { for (i=0; i<ca1->ndim; i++) { if ( ca1->dim[i] != ca2->dim[i] ) { return 0; } } return 1; } } int ca_is_valid_index (void *ap, ca_size_t *idx) { CArray *ca = (CArray *) ap; int8_t i; for (i=0; i<ca->ndim; i++) { if ( idx[i] < 0 || idx[i] >= ca->dim[i] ) { return 0; } } return 1; } /* predicate whether data_type is integer or not */ int rb_ca_is_type (VALUE arg, int type) { CArray *ca; if ( ! rb_obj_is_carray(arg) ) { rb_raise(rb_eRuntimeError, "CArray required"); } Data_Get_Struct(arg, CArray, ca); return ca->data_type == type; } /* ------------------------------------------------------------- */ void ca_check_data_class (VALUE rtype) { if ( ! rb_obj_is_data_class(rtype) ) { VALUE inspect = rb_inspect(rtype); rb_raise(rb_eRuntimeError, "<%s> is not a data_class, which should has the features\n" \ " * constant data_class::DATA_SIZE -> integer\n" \ " * constant data_class::MEMBERS -> array of string\n" \ " * constant data_class::MEMBER_TABLE -> hash\n" \ " * method data_class.decode(str) -> data_class object\n" \ " * method data_class#encode() -> string", StringValuePtr(inspect)); } } VALUE rb_obj_is_data_class (VALUE rtype) { VALUE has_data_size, has_member_names, has_member_table; VALUE has_encode, has_decode; if ( TYPE(rtype) == T_CLASS ) { has_data_size = rb_funcall(rtype, rb_intern("const_defined?"), 1, rb_str_new2("DATA_SIZE")); has_member_names = rb_funcall(rtype, rb_intern("const_defined?"), 1, rb_str_new2("MEMBERS")); has_member_table = rb_funcall(rtype, rb_intern("const_defined?"), 1, rb_str_new2("MEMBER_TABLE")); has_encode = rb_funcall(rtype, rb_intern("method_defined?"), 1, rb_str_new2("encode")); has_decode = rb_respond_to(rtype, rb_intern("decode")); return ( RTEST(has_data_size) && RTEST(has_member_table) && RTEST(has_member_names) && RTEST(has_encode) && RTEST(has_decode) ) ? Qtrue : Qfalse; } return Qfalse; } static VALUE rb_ca_s_is_data_class (VALUE self, VALUE rklass) { return rb_obj_is_data_class(rklass); } /* ------------------------------------------------------------- */ /* @overload valid_index? (*idx) (Inquiry) Returns true if the given number list is valid as array index for the object */ static VALUE rb_ca_is_valid_index (int argc, VALUE *argv, VALUE self) { CArray *ca; ca_size_t idx; int i; Data_Get_Struct(self, CArray, ca); if ( argc != ca->ndim ) { rb_raise(rb_eArgError, "invalid # of arguments (%i for %i)", argc, ca->ndim); } for (i=0; i<ca->ndim; i++) { idx = NUM2SIZE(argv[i]); /* if ( idx < 0 ) { idx += ca->dim[i]; } */ if ( idx < 0 || idx >= ca->dim[i] ) { return Qfalse; } } return Qtrue; } /* @overload valid_addr? (*addr) (Inquiry) Returns true if the given number is valid as array address for the object */ static VALUE rb_ca_is_valid_addr (VALUE self, VALUE raddr) { CArray *ca; ca_size_t addr; Data_Get_Struct(self, CArray, ca); addr = NUM2SIZE(raddr); /* if ( addr < 0 ) { addr += ca->elements; } */ if ( addr < 0 || addr >= ca->elements ) { return Qfalse; } else { return Qtrue; } } /* @overload has_same_shape? (Inquiry) Returns true if the object has the same shape with the given array. */ static VALUE rb_ca_has_same_shape (VALUE self, VALUE other) { CArray *ca, *cb; Data_Get_Struct(self, CArray, ca); cb = ca_wrap_readonly(other, ca->data_type); return ca_has_same_shape(ca, cb) ? Qtrue : Qfalse; } /* ----------------------------------------------------------------------- */ typedef int (*ca_eql_func)(); #define eql_type(type) \ static int \ eql_## type (type *a, type *b, int bytes) \ { \ return ( *a == *b ); \ } static int eql_VALUE (VALUE *a, VALUE *b, int bytes) { return ( rb_funcall(*a, id_equal, 1, *b) ) ? 1 : 0; } static int eql_data (char *a, char *b, int bytes) { return ( memcmp(a, b, bytes) ) ? 0 : 1; } eql_type(boolean8_t) eql_type(int8_t) eql_type(uint8_t) eql_type(int16_t) eql_type(uint16_t) eql_type(int32_t) eql_type(uint32_t) eql_type(int64_t) eql_type(uint64_t) eql_type(float32_t) eql_type(float64_t) eql_type(float128_t) eql_type(cmplx64_t) eql_type(cmplx128_t) eql_type(cmplx256_t) ca_eql_func ca_eql[CA_NTYPE] = { eql_data, eql_boolean8_t, eql_int8_t, eql_uint8_t, eql_int16_t, eql_uint16_t, eql_int32_t, eql_uint32_t, eql_int64_t, eql_uint64_t, eql_float32_t, eql_float64_t, eql_float128_t, eql_cmplx64_t, eql_cmplx128_t, eql_cmplx256_t, eql_VALUE, }; int ca_equal (void *ap, void *bp) { CArray *ca = (CArray *) ap; CArray *cb = (CArray *) bp; int flag = 1; int masked_a, masked_b; boolean8_t *ma, *mb; ca_size_t i; ca_size_t bytes; char *pa; char *pb; ca_eql_func eql; if ( ca_is_scalar(ca) ^ ca_is_scalar(cb) ) { /* scalar comparison */ return 0; } if ( ca->data_type != cb->data_type ) { /* data_type comparison */ return 0; } if ( ca->bytes != cb->bytes ) { /* data_type comparison */ return 0; } if ( ca->ndim != cb->ndim ) { /* ndim comparison */ return 0; } if ( ca->elements != cb->elements ) { /* elements comparison */ return 0; } for (i=0; i<ca->ndim; i++) { if ( ca->dim[i] != cb->dim[i] ) { /* dimensional shape comparison */ return 0; } } ca_attach_n(2, ca, cb); /* array contents comparison */ masked_a = ca_is_any_masked(ca); masked_b = ca_is_any_masked(cb); bytes = ca->bytes; pa = ca->ptr; pb = cb->ptr; eql = ca_eql[ca->data_type]; if ( masked_a && masked_b ) { /* masked vs masked */ ma = (boolean8_t*) ca->mask->ptr; mb = (boolean8_t*) cb->mask->ptr; for (i=0; i<ca->elements; i++, ma++, mb++) { if ( *ma != *mb || ( ( ! *ma ) && ( ! eql(pa, pb, bytes) ) ) ) { flag = 0; break; } pa += bytes; pb += bytes; } } else if ( masked_a ) { /* masked vs not-masked */ ma = (boolean8_t*) ca->mask->ptr; for (i=0; i<ca->elements; i++, ma++) { if ( *ma || ( ! eql(pa, pb, bytes) ) ) { flag = 0; break; } pa += bytes; pb += bytes; } } else if ( masked_b ) { /* not-masked vs masked */ mb = (boolean8_t*) cb->mask->ptr; for (i=0; i<ca->elements; i++, mb++) { if ( *mb || ( ! eql(pa, pb, bytes) ) ) { flag = 0; break; } pa += bytes; pb += bytes; } } else { /* not-masked vs not-masked */ for (i=0; i<ca->elements; i++) { if ( ! eql(pa, pb, bytes) ) { flag = 0; break; } pa += bytes; pb += bytes; } } ca_detach_n(2, ca, cb); return flag; } /* @overload == (other) (Inquiry) Returns true if the object equals the given array. */ static VALUE rb_ca_equal (VALUE self, VALUE other) { CArray *ca, *cb; if ( ! rb_obj_is_carray(other) ) { /* check kind_of?(CArray) */ return Qfalse; } if ( rb_ca_has_data_class(self) || rb_ca_has_data_class(other) ) { if ( rb_ca_has_data_class(self) ^ rb_ca_has_data_class(other) ) { return Qfalse; } else { VALUE dc1 = rb_ca_data_class(self); VALUE dc2 = rb_ca_data_class(other); if ( ! rb_funcall(dc1, rb_intern("=="), 1, dc2) ) { return Qfalse; } } } Data_Get_Struct(self, CArray, ca); Data_Get_Struct(other, CArray, cb); return ( ca_equal(ca, cb) ) ? Qtrue : Qfalse; } /* ca_mem_hash() This hash function is modified version of rb_str_hash() in string.c of Ruby 1.8.6 distribution. */ int32_t ca_mem_hash (char *mp, ca_size_t mlen) { register ca_size_t len = mlen; register char *p = mp; register int32_t key = 0; while (len--) { key = key*65599 + *p; p++; } return key; } static int32_t ca_hash (CArray *ca) { int32_t hash; if ( ca_is_any_masked(ca) ) { ca_size_t bytes = ca->bytes; boolean8_t *m = (boolean8_t*) ca->mask->ptr; /* char *tptr = ALLOC_N(char, ca_length(ca)); */ char *tptr = malloc_with_check(ca_length(ca)); char *p; int32_t i; ca_attach(ca); memcpy(tptr, ca->ptr, ca_length(ca)); p = tptr; #ifdef _OPENMP #pragma omp parallel for #endif for (i=0; i<ca->elements; i++) { if ( *(m+i) ) { memset(p+i*bytes, 0, bytes); } } hash = ca_mem_hash(tptr, ca_length(ca)); hash ^= ca_mem_hash(ca->mask->ptr, ca->elements); ca_detach(ca); free(tptr); } else { ca_attach(ca); hash = ca_mem_hash(ca->ptr, ca_length(ca)); ca_detach(ca); } return hash; } /* @overload hash (Inquiry) Returns the hash value of the object. */ VALUE rb_ca_hash (VALUE self) { CArray *ca; int32_t hash; Data_Get_Struct(self, CArray, ca); hash = ca_hash(ca); return ULONG2NUM(hash); } /* ----------------------------------------------------------------------- */ void rb_ca_modify (VALUE self) { if ( OBJ_FROZEN(self) ) { rb_error_frozen("CArray object"); } /* if ( ( ! OBJ_TAINTED(self) ) && rb_safe_level() >= 4 ) { rb_raise(rb_eSecurityError, "Insecure: can't modify carray"); } */ } /* @overload freeze Freeze the object. */ VALUE rb_ca_freeze (VALUE self) { CArray *ca; Data_Get_Struct(self, CArray, ca); ca_set_flag(ca, CA_FLAG_READ_ONLY); return rb_obj_freeze(self); } void Init_carray_test () { id_equal = rb_intern("=="); rb_define_method(rb_cCArray, "valid_index?", rb_ca_is_valid_index, -1); rb_define_method(rb_cCArray, "valid_addr?", rb_ca_is_valid_addr, 1); rb_define_method(rb_cCArray, "same_shape?", rb_ca_has_same_shape, 1); rb_define_method(rb_cCArray, "freeze", rb_ca_freeze, 0); rb_define_method(rb_cCArray, "==", rb_ca_equal, 1); rb_define_alias(rb_cCArray, "eql?", "=="); rb_define_method(rb_cCArray, "hash", rb_ca_hash, 0); rb_define_singleton_method(rb_cCArray, "data_class?", rb_ca_s_is_data_class, 1); }
benchmark.c
/** * @file benchmark.c * @brief benchmark the amount of time saved by parallel program * @note compile with '--std=c99' * @author Lumin <cdluminate@gmail.com> */ #define USE_CUDA #undef USE_CUDA #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <omp.h> #include <sys/time.h> // high precision timer, gettimeofday() #include <assert.h> #ifdef USE_CUDA #include "cudabench.h" // cuda benchmarks #endif double * new_vector (size_t len); void fill_vector (double * v, size_t len, double val); void dump_vector (double * v, size_t size); void del_vector (double * v); /** * @brief flag, set 1 to dump all debug information */ int debug = 0; /** * @brief vector length used in L-1 benchmarks */ #define VLEN 1024*1024*16 /** * @brief matrix size used in L-2 benchmarks */ #define MVLEN 1024*4 /** * @brief matrix size used in L-3 benchmarks */ #define MMLEN 256 /** * @brief IMLEN image size, KLEN kernel size, FLEN(IM,K) feature map size */ #define IMLEN 512 #define KLEN 17 #define FLEN(im,k) ((im-k+1)) /** * @brief helper function for tester */ void check_vector_eq (const double * src, const double * dest, size_t n) { for (size_t i = 0; i < n; i++) { if (fabs(src[i] - dest[i]) > 1e-5) { fprintf (stderr, "E: check_vector_eq failure at element %ld\n", i); return; } } } /** * @breif dcopy, L-1 BLAS, serial */ void dcopy_serial (const double * src, double * dest, size_t n) { for (long i = 0; i < n; i++) dest[i] = src[i]; return; } /** * @brief dcopy, L-1 BLAS, parallel */ void dcopy_parallel (const double * src, double * dest, size_t n) { #pragma omp parallel for shared(src, dest) for (long i = 0; i < n; i++) dest[i] = src[i]; return; } /** * @brief tester for dcopy */ void test_dcopy (void (* dcopy)(const double * src, double * dest, size_t n)) { printf("[ .. ] test dcopy@%p\n", dcopy); // short vector double * A = new_vector(128); double * C = new_vector(128); fill_vector(A, 128, 1.); fill_vector(C, 128, 0.); dcopy (A, C, 128); check_vector_eq (A, C, 128); del_vector(A); del_vector(C); // long vector double * B = new_vector(65536); double * D = new_vector(65536); fill_vector(B, 65536, 2.); fill_vector(D, 65536, 0.); dcopy (B, D, 65536); check_vector_eq (B, D, 65536); del_vector (B); del_vector (D); printf("[ OK ] test dcopy@%p\n", dcopy); } /** * @brief dasum, L-1 BLAS, serial */ double dasum_serial (const double * a, size_t n) { double ret = 0.; for (long i = 0; i < n; i++) { ret += (a[i]>0.)?(a[i]):(-a[i]); //if (0 == i % 1000000) printf (" iter %ld, n = %lf\n", i, ret); // debug } return ret; } /** * @brief dasum, L-1 BLAS, parallel */ double dasum_parallel (const double * a, size_t n) { double ret = 0.; #pragma omp parallel for reduction (+:ret) for (long i = 0; i < n; i++) ret += (a[i]>0.)?(a[i]):(-a[i]); return ret; } /** * @brief tester for dasum */ void test_dasum (double (* dasum)(const double * a, size_t n)) { printf("[ .. ] test dasum@%p\n", dasum); // long vector double * A = new_vector(1280); fill_vector(A, 1280, 1.); //dump_vector (A, 128); double ret = dasum (A, 1280); //printf ("%lf\n", ret); assert(fabs(ret - 1280.) < 1e-5); del_vector (A); // short vector // FIXME: BUG: wrong result dasum_cuda when size 128 double * B = new_vector(128); fill_vector(B, 128, 1.0); ret = dasum(B, 128); assert(fabs(ret - 128.) < 1e-5); del_vector (B); // another long vector double * C = new_vector(1200); fill_vector(C, 1200, 1.); //dump_vector (A, 128); ret = dasum (C, 1200); //printf ("%lf\n", ret); assert(fabs(ret - 1200.) < 1e-5); del_vector (C); printf("[ OK ] test dasum@%p\n", dasum); } /** * @brief dscal, L-1 BLAS, serial */ void dscal_serial (double * x, const double a, size_t n) { for (size_t i = 0; i < n; i++) x[i] *= a; } /** * @brief dscal, L-1 BLAS, parallel */ void dscal_parallel (double * x, const double a, size_t n) { #pragma omp parallel for shared(x) for (size_t i = 0; i < n; i++) x[i] *= a; } /** * @brief ddot, L-1 BLAS, serial */ double ddot_serial (const double * a, const double * b, size_t n) { double ret = 0.; for (long i = 0; i < n; i++) ret += a[i] * b[i]; return ret; } /** * @brief ddot, L-1 BLAS, parallel */ double ddot_parallel (const double * a, const double * b, size_t n) { double ret = 0.; #pragma omp parallel for reduction (+:ret) for (long i = 0; i < n; i++) ret += a[i] * b[i]; return ret; } /** * @brief daxpby, L-1 BLAS Extension, serial */ void daxpby_serial (const double * x, const double a, double * y, const double b, size_t n) { // a x + b y -> y for (long i = 0; i < n; i++) y[i] += a * x[i] + b * y[i]; } /** * @brief daxpby, L-1 BLAS Extension, parallel */ void daxpby_parallel (const double * x, const double a, double * y, const double b, size_t n) { // a x + b y -> y #pragma omp parallel for shared(x, y) for (long i = 0; i < n; i++) y[i] += a * x[i] + b * y[i]; } /** * @brief dgemv, L-2 BLAS, serial * @f[ a x * b y -> dest @f] */ void dgemv_serial (const double * x, const double a, const double * y, const double b, size_t n, double * dest) { // note, x is matrix ! for (size_t i = 0; i < n; i++) { // for each row of x dest[i] = y[i]; for (size_t j = 0; j < n; j++) { // for each column of y dest[i] += a * *(x+(i*n)+j) * b * y[j]; } } } /** * @brief dgemv, L-2 BLAS, parallel * @f[ a x * b y -> dest @f] */ void dgemv_parallel (const double * x, const double a, const double * y, const double b, size_t n, double * dest) { size_t j = 0; #pragma omp parallel for shared(x, y, dest) private(j) for (size_t i = 0; i < n; i++) { // for each row of x dest[i] = y[i]; for (j = 0; j < n; j++) { // for each column of y dest[i] += a * *(x+(i*n)+j) * b * y[j]; } } } /** * @brief dgemv, L-2 BLAS, parallel version 2 * @f[ a x * b y -> dest @f] */ void dgemv_parallelv2 (const double * x, const double a, const double * y, const double b, size_t n, double * dest) { size_t j = 0; size_t i = 0; dcopy_parallel(y, dest, n); #pragma omp parallel for collapse(2) shared(x, y, dest) private(i,j) for (i = 0; i < n; i++) { // for each row of x for (j = 0; j < n; j++) { // for each column of y dest[i] += a * *(x+(i*n)+j) * b * y[j]; }} } /** * @brief dgemm, L-3 BLAS, serial version * @f[ A_{m x n} * B_{n x k} -> C_{m x k} @f] */ void dgemm_serial (const double * A, const double * B, size_t m, size_t n, size_t k, double * C) { size_t mm = 0, nn = 0, kk = 0; for (mm = 0; mm < m; mm++) { for (kk = 0; kk < k; kk++) { *(C+mm*k+kk) = 0; for (nn = 0; nn < n; nn++) { *(C+mm*k+kk) += *(A+mm*n+nn) * *(B+nn*k+kk); } } } } /** * @brief dgemm, L-3 BLAS, parallel version * @f[ A_{m x n} * B_{n x k} -> C_{m x k} @f] */ void dgemm_parallel (const double * A, const double * B, size_t m, size_t n, size_t k, double * C) { size_t mm = 0, nn = 0, kk = 0; #pragma omp parallel for collapse(2) shared(A, B) private(nn) // Note, dynamic scheduler seems to reduce performance here for (mm = 0; mm < m; mm++) { for (kk = 0; kk < k; kk++) { *(C+mm*k+kk) = 0; for (nn = 0; nn < n; nn++) { *(C+mm*k+kk) += *(A+mm*n+nn) * *(B+nn*k+kk); } }} } /** * @brief dgemm, L-3 BLAS, parallel version 2 * @f[ A_{m x n} * B_{n x k} -> C_{m x k} @f] */ void dgemm_parallelv2 (const double * A, const double * B, size_t m, size_t n, size_t k, double * C) { size_t mm = 0, nn = 0, kk = 0; #pragma omp parallel for shared(A, B) private(kk,nn) for (mm = 0; mm < m; mm++) { for (kk = 0; kk < k; kk++) { *(C+mm*k+kk) = 0; for (nn = 0; nn < n; nn++) { *(C+mm*k+kk) += *(A+mm*n+nn) * *(B+nn*k+kk); } } } } /** * @brief 2-D convolution in serial * (Computer Vision Convolution, not Signal Convolution) * @param[in] smap source map * @param[in] dmap destination map * @param[in] m smap size * @param[in] k kernel size * @note no padding */ void conv2_serial (const double * smap, const double * kernel, size_t ssize, size_t ksize, double * dmap) { for (unsigned int i = 0; i < FLEN(ssize,ksize); i++) { // for each row of output map for (unsigned int j = 0; j < FLEN(ssize,ksize); j++) { // for each column of output map // element wise mult, smap part with kernel double sum = 0.; for (unsigned int m = 0; m < ksize; m++) { for (unsigned int n = 0; n < ksize; n++) { sum += kernel[m*ksize +n] * smap[(i+m)*ssize + j+n]; }} // finish (i,j) of output feature map dmap[i*FLEN(ssize,ksize)+j] = sum; }} return; } /** * @brief 2-D convolution in parallel * (Computer Vision Convolution, not Signal Convolution) */ void conv2_parallel (const double * smap, const double * kernel, size_t ssize, size_t ksize, double * dmap) { double sum = 0.; #pragma omp parallel for collapse(2) shared(smap,kernel,dmap) private(sum) for (unsigned int i = 0; i < FLEN(ssize,ksize); i++) { // for each row of output map for (unsigned int j = 0; j < FLEN(ssize,ksize); j++) { // for each column of output map // element wise mult, smap part with kernel sum = 0.; for (unsigned int m = 0; m < ksize; m++) { for (unsigned int n = 0; n < ksize; n++) { sum += kernel[m*ksize +n] * smap[(i+m)*ssize + j+n]; }} // finish (i,j) of output feature map dmap[i*FLEN(ssize,ksize)+j] = sum; }} return; } /** * @brief tell user the time difference in second. * @param tvs the starting time stamp. * @param tve the ending timp stamp. * @see sys/time.h, gettimeofday(2) */ void timediff (struct timeval tvs, struct timeval tve, char * msg) { long diff_sec = tve.tv_sec - tvs.tv_sec; long diff_usec = tve.tv_usec - tvs.tv_usec; double dtime = diff_sec + diff_usec/1e+6; fprintf (stdout, "I: [%s] time cost is %1.6f seconds.\n", (msg==NULL)?"":msg, dtime); } /** * @brief find the time difference in second */ double gettimediff (struct timeval tvs, struct timeval tve) { return ((tve.tv_sec - tvs.tv_sec) + (tve.tv_usec - tvs.tv_usec)/1e+6); } /** * @brief print a spliting line on screen */ void hrulefill (void) { for (int i = 0; i < 80; i++) fprintf (stdout, "-"); fprintf (stdout, "\n"); return; } /** * @brief dump a vector to screen */ void dump_vector (double * v, size_t size) { for (size_t i = 0; i < size; i++) fprintf (stdout, " %.3lf", v[i]); fprintf (stdout, "\n"); return; } /** * @brief dump a matrix to screen */ void dump_matrix (double * m, size_t row, size_t col) { for (size_t i = 0; i < row; i++) { for (size_t j = 0; j < col; j++) fprintf (stdout, " %.3lf", m[i*col+j]); fprintf (stdout, "\n"); } return; } /** * @brief allocate a vector in double * @note values of vector not initialized on allocation. */ double * new_vector (size_t len) { double * ret = (double *)malloc(len*sizeof(double)); assert(ret != NULL); return ret; } /** * @brief delete a vector in double */ void del_vector (double * v) { free(v); } /** * @brief fill a double vector with a value */ void fill_vector (double * v, size_t len, double val) { for (size_t i = 0; i < len; i++) v[i] = val; return; } /** * @brief allocate a double matrix */ double * new_matrix (size_t row, size_t col) { double * ret = (double *)malloc(row*col*sizeof(double)); assert(ret != NULL); return ret; } /** * @brief delete a matrix in double */ void del_matrix (double * m) { free(m); } /** * @brief fill a double matrix with a value */ void fill_matrix (double * m, size_t row, size_t col, double val) { for (size_t i = 0; i < row; i++) for (size_t j = 0; j < col; j++) m[i*col+j] = val; return; } // benchmark for dcopy void benchmark_dcopy (void (* dcopy)(const double * src, double * dest, size_t n)) { struct timeval tvs; struct timeval tve; long sizes[8] ={ 1, 16, 256, 4096, 65536, 1048576, 16777216, 33554432 }; double results[8]={ 0.,0., 0., 0., 0., 0., 0., 0. }; // print table header for (int i = 0; i < 8; i++) printf ("|%8ld", sizes[i]); printf ("|\n"); for (int i = 0; i < 8; i++) { // prepare memory for data double * A = new_vector(sizes[i]); double * C = new_vector(sizes[i]); fill_vector (A, sizes[i], 1.); fill_vector (C, sizes[i], 0.); // calculate gettimeofday (&tvs, NULL); dcopy (A, C, sizes[i]); gettimeofday (&tve, NULL); check_vector_eq (A, C, sizes[i]); // store result results[i] = gettimediff (tvs, tve); del_vector (A); del_vector (C); } // print results for (int i = 0; i < 8; i++) printf ("|%8.6lf", results[i]); printf ("|\n"); } // benchmark for dasum void benchmark_dasum (double (* dasum)(const double * src, size_t n)) { struct timeval tvs; struct timeval tve; long sizes[8] ={ 1, 16, 256, 4096, 65536, 1048576, 16777216, 33554432 }; double results[8]={ 0.,0., 0., 0., 0., 0., 0., 0. }; // print table header for (int i = 0; i < 8; i++) printf ("|%8ld", sizes[i]); printf ("|\n"); for (int i = 0; i < 8; i++) { // prepare memory for data double * A = new_vector(sizes[i]); fill_vector (A, sizes[i], 1.); // calculate gettimeofday (&tvs, NULL); (void) dasum (A, sizes[i]); // discard the summary gettimeofday (&tve, NULL); // store result results[i] = gettimediff (tvs, tve); del_vector (A); } // print results for (int i = 0; i < 8; i++) printf ("|%8.6lf", results[i]); printf ("|\n"); } // benchmark for ddot void benchmark_ddot (double (* ddot)(const double * a, const double * b, size_t n)) { struct timeval tvs; struct timeval tve; long sizes[8] ={ 1, 16, 256, 4096, 65536, 1048576, 16777216, 33554432 }; double results[8]={ 0.,0., 0., 0., 0., 0., 0., 0. }; // print table header for (int i = 0; i < 8; i++) printf ("|%8ld", sizes[i]); printf ("|\n"); for (int i = 0; i < 8; i++) { // prepare memory for data double * A = new_vector(sizes[i]); double * B = new_vector(sizes[i]); fill_vector (A, sizes[i], 1.); fill_vector (B, sizes[i], 1.); // calculate gettimeofday (&tvs, NULL); (void) ddot (A, B, sizes[i]); // discard the summary gettimeofday (&tve, NULL); // store result results[i] = gettimediff (tvs, tve); del_vector (A); del_vector (B); } // print results for (int i = 0; i < 8; i++) printf ("|%8.6lf", results[i]); printf ("|\n"); } // benchmark for dscal void benchmark_dscal (void (* dscal)(double * x, const double a, size_t n)) { struct timeval tvs; struct timeval tve; long sizes[8] ={ 1, 16, 256, 4096, 65536, 1048576, 16777216, 33554432 }; double results[8]={ 0.,0., 0., 0., 0., 0., 0., 0. }; // print table header for (int i = 0; i < 8; i++) printf ("|%8ld", sizes[i]); printf ("|\n"); for (int i = 0; i < 8; i++) { // prepare memory for data double * A = new_vector(sizes[i]); fill_vector (A, sizes[i], 1.); // calculate gettimeofday (&tvs, NULL); dscal (A, 0.5, sizes[i]); // discard the summary gettimeofday (&tve, NULL); // store result results[i] = gettimediff (tvs, tve); del_vector (A); } // print results for (int i = 0; i < 8; i++) printf ("|%8.6lf", results[i]); printf ("|\n"); } /** * @brief Lumin's benchmark */ int main (int argc, char ** argv, char ** envp) { fprintf (stdout, "Lumin's serial/parallel/cuda benchmark\nI: initializing ... "); fflush(stdout); struct timeval tvs; // tv_s, for starting point struct timeval tve; // tv_e, for ending point // init times struct timeval tvi; // tv_init struct timeval tvt; // tv_terminate gettimeofday(&tvi, NULL); fprintf(stdout, "[OK]\n"); hrulefill(); { // start unit tests test_dcopy(dcopy_serial); test_dcopy(dcopy_parallel); #ifdef USE_CUDA test_dcopy(dcopy_cuda); #endif // USE_CUDA test_dasum(dasum_serial); test_dasum(dasum_parallel); #ifdef USE_CUDA test_dasum(dasum_cuda); #endif } // end unit tests hrulefill(); { // copy test printf ("I: [dcopy_serial] test series\n"); benchmark_dcopy (dcopy_serial); printf ("I: [dcopy_parallel] test series\n"); benchmark_dcopy (dcopy_parallel); #ifdef USE_CUDA printf ("I: [dcopy_cuda] test series\n"); benchmark_dcopy (dcopy_cuda); #endif // USE_CUDA } hrulefill(); { // asum test printf ("I: [dasum_serial] test series\n"); benchmark_dasum (dasum_serial); printf ("I: [dasum_parallel] test series\n"); benchmark_dasum (dasum_parallel); #ifdef USE_CUDA printf ("I: [dasum_cuda] test series\n"); benchmark_dasum (dasum_cuda); #endif // USE_CUDA } hrulefill(); { // dot test // FIXME: add unit tests for ddot // run benchmarks printf ("I: [ddot_serial] test series\n"); benchmark_ddot (ddot_serial); printf ("I: [ddot_parallel] test series\n"); benchmark_ddot (ddot_parallel); } hrulefill(); { // scal test // FIXME: add unit tests // run benchmarks printf ("I: [dscal_serial] test series\n"); benchmark_dscal (dscal_serial); printf ("I: [dscal_parallel] test series\n"); benchmark_dscal (dscal_parallel); #ifdef USE_CUDA printf ("I: [dscal_cuda] test series\n"); benchmark_dscal (dscal_cuda); #endif // USE_CUDA } hrulefill(); { // axpby test // data double * A = new_vector(VLEN); double * C = new_vector(VLEN); fill_vector(A, VLEN, 1.); fill_vector(C, VLEN, 1.); // serial gettimeofday(&tvs, NULL); daxpby_serial (A, 0.5, C, 0.5, VLEN); gettimeofday(&tve, NULL); timediff (tvs, tve, "daxpby in serial"); if (debug) dump_vector(A, VLEN); if (debug) dump_vector(C, VLEN); // parallel gettimeofday(&tvs, NULL); daxpby_parallel (A, 0.5, C, 0.5, VLEN); gettimeofday(&tve, NULL); timediff (tvs, tve, "daxpby in parallel"); if (debug) dump_vector(A, VLEN); if (debug) dump_vector(C, VLEN); // post-test del_vector(A); del_vector(C); } hrulefill(); { // gemv test // data double * M = new_matrix(MVLEN, MVLEN); double * A = new_vector(MVLEN); double * Y = new_vector(MVLEN); fill_matrix(M, MVLEN, MVLEN, 1.); fill_vector(A, MVLEN, 1.); fill_vector(Y, MVLEN, 1.); if (debug) dump_matrix(M, MVLEN, MVLEN); if (debug) dump_vector(A, MVLEN); // serial gettimeofday(&tvs, NULL); dgemv_serial (M, 1., A, 1., MVLEN, Y); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemv in serial"); if (debug) dump_vector(Y, MVLEN); // parallel gettimeofday(&tvs, NULL); dgemv_parallel (M, 1., A, 1., MVLEN, Y); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemv in parallel"); if (debug) dump_vector(Y, MVLEN); // parallelv2 gettimeofday(&tvs, NULL); dgemv_parallelv2 (M, 1., A, 1., MVLEN, Y); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemv in parallelv2"); if (debug) dump_vector(Y, MVLEN); // post-test del_matrix(M); del_vector(A); del_vector(Y); } hrulefill(); { // gemm // data double * X = new_matrix(MMLEN, MMLEN); double * Y = new_matrix(MMLEN, MMLEN); double * Z = new_matrix(MMLEN, MMLEN); fill_matrix(X, MMLEN, MMLEN, 1.); fill_matrix(Y, MMLEN, MMLEN, 1.); fill_matrix(Z, MMLEN, MMLEN, 0.); if (debug) dump_matrix(X, MMLEN, MMLEN); if (debug) dump_matrix(Y, MMLEN, MMLEN); // serial gettimeofday(&tvs, NULL); dgemm_serial (X, Y, MMLEN, MMLEN, MMLEN, Z); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemm in serial"); if (debug) dump_matrix(Z, MMLEN, MMLEN); // parallel gettimeofday(&tvs, NULL); dgemm_parallel (X, Y, MMLEN, MMLEN, MMLEN, Z); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemm in parallel"); if (debug) dump_matrix(Z, MMLEN, MMLEN); // parallel v2 gettimeofday(&tvs, NULL); dgemm_parallelv2 (X, Y, MMLEN, MMLEN, MMLEN, Z); gettimeofday(&tve, NULL); timediff (tvs, tve, "dgemm in parallelv2"); if (debug) dump_matrix(Z, MMLEN, MMLEN); // post-test del_matrix(X); del_matrix(Y); del_matrix(Z); } hrulefill(); { // convolution // data double * image = new_matrix(IMLEN, IMLEN); double * kernel = new_matrix(KLEN, KLEN); double * fmap = new_matrix(FLEN(IMLEN,KLEN), FLEN(IMLEN,KLEN)); fill_matrix(image, IMLEN, IMLEN, 1.); fill_matrix(kernel, KLEN, KLEN, 1.); fill_matrix(fmap, FLEN(IMLEN,KLEN), FLEN(IMLEN,KLEN), 0.); if (debug) dump_matrix(image, IMLEN, IMLEN); if (debug) dump_matrix(kernel, KLEN, KLEN); // serial gettimeofday(&tvs, NULL); conv2_serial (image, kernel, IMLEN, KLEN, fmap); gettimeofday(&tve, NULL); timediff (tvs, tve, "conv2 in serial"); if (debug) dump_matrix(fmap, FLEN(IMLEN,KLEN), FLEN(IMLEN,KLEN)); // parallel gettimeofday(&tvs, NULL); conv2_parallel (image, kernel, IMLEN, KLEN, fmap); gettimeofday(&tve, NULL); timediff (tvs, tve, "conv2 in parallel"); if (debug) dump_matrix(fmap, FLEN(IMLEN,KLEN), FLEN(IMLEN,KLEN)); // post-test del_matrix(image); del_matrix(kernel); del_matrix(fmap); } hrulefill(); // how long all the benchmarks take gettimeofday(&tvt, NULL); timediff(tvi, tvt, "All benchmark"); return 0; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % John Cristy % % July 1998 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize (1UL << 15) #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireCacheView(image); floodplane_view=AcquireCacheView(floodplane_image); while (s > segment_stack) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); if ((start_color->opacity == OpaqueOpacity) && (stop_color->opacity == OpaqueOpacity)) image->matte=MagickFalse; if ((IsGrayPixel(start_color) != MagickFalse) && (IsGrayPixel(stop_color) != MagickFalse)) image->type=GrayscaleType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=GetOpenMPMaximumThreads(); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,0.5); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (paint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); paint_view=AcquireCacheView(paint_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict paint_indexes; register ssize_t x; register PixelPacket *restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(PixelIntensityToQuantum(p+u+i)); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (image->columns+width); } *q=(*(p+j)); if (image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex( indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Make image color opaque. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill->red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill->green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill->blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(fill->opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill->index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
gimple.h
/* Gimple IR definitions. Copyright (C) 2007-2013 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "ggc.h" #include "basic-block.h" #include "tree.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" #include "internal-fn.h" typedef gimple gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef vec<gimple> gimple_vec; enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_FROM_THUNK = 1 << 0, GF_CALL_RETURN_SLOT_OPT = 1 << 1, GF_CALL_TAILCALL = 1 << 2, GF_CALL_VA_ARG_PACK = 1 << 3, GF_CALL_NOTHROW = 1 << 4, GF_CALL_ALLOCA_FOR_VAR = 1 << 5, GF_CALL_INTERNAL = 1 << 6, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_OMP_ATOMIC_NEED_VALUE = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there are only two types of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0, GIMPLE_DEBUG_SOURCE_BIND = 1 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq *seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY((chain_next ("%h.next"))) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ basic_block bb; /* [ WORD 4-5 ] Linked lists of gimple statements. The next pointers form a NULL terminated list, the prev pointers are a cyclic list. A gimple statement is hence also a double-ended list of statements, with the pointer itself being the first element, and the prev pointer being the last. */ gimple next; gimple GTY((skip)) prev; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-7 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 8 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-7 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 8-9 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-9 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 10 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-9 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 10-13 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 14 ] */ union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) { tree GTY ((tag ("0"))) fntype; enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn; } u; /* [ WORD 15 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] Variables declared in this scope. */ tree vars; /* [ WORD 8 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 9 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] */ tree types; /* [ WORD 8 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] Filter types. */ tree types; /* [ WORD 8 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_ELSE */ struct GTY(()) gimple_statement_eh_else { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7,8 ] */ gimple_seq n_body, e_body; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] */ unsigned capacity; unsigned nargs; /* [ WORD 8 ] */ tree result; /* [ WORD 9 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 8 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 7 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-9 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 10 ] __asm__ statement. */ const char *string; /* [ WORD 11 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 12 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-7 ] */ struct gimple_statement_omp omp; /* [ WORD 8 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-7 ] */ struct gimple_statement_omp omp; /* [ WORD 8 ] */ tree clauses; /* [ WORD 9 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 10 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 11 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-7 ] */ struct gimple_statement_omp omp; /* [ WORD 8 ] Clauses. */ tree clauses; /* [ WORD 9 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 10 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-10 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 11 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 12-13 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-7 ] */ struct gimple_statement_omp omp; /* [ WORD 8 ] */ tree clauses; /* [ WORD 9 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] */ tree control_def; /* [ WORD 8 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-7 ] */ struct gimple_statement_omp omp; /* [ WORD 7 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7-8 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-6 ] */ struct gimple_statement_base gsbase; /* [ WORD 7 ] */ tree val; }; /* GIMPLE_TRANSACTION. */ /* Bits to be stored in the GIMPLE_TRANSACTION subcode. */ /* The __transaction_atomic was declared [[outer]] or it is __transaction_relaxed. */ #define GTMA_IS_OUTER (1u << 0) #define GTMA_IS_RELAXED (1u << 1) #define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED) /* The transaction is seen to not have an abort. */ #define GTMA_HAVE_ABORT (1u << 2) /* The transaction is seen to have loads or stores. */ #define GTMA_HAVE_LOAD (1u << 3) #define GTMA_HAVE_STORE (1u << 4) /* The transaction MAY enter serial irrevocable mode in its dynamic scope. */ #define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5) /* The transaction WILL enter serial irrevocable mode. An irrevocable block post-dominates the entire transaction, such that all invocations of the transaction will go serial-irrevocable. In such case, we don't bother instrumenting the transaction, and tell the runtime that it should begin the transaction in serial-irrevocable mode. */ #define GTMA_DOES_GO_IRREVOCABLE (1u << 6) /* The transaction contains no instrumentation code whatsover, most likely because it is guaranteed to go irrevocable upon entry. */ #define GTMA_HAS_NO_INSTRUMENTATION (1u << 7) struct GTY(()) gimple_statement_transaction { /* [ WORD 1-9 ] */ struct gimple_statement_with_memory_ops_base gsbase; /* [ WORD 10 ] */ gimple_seq body; /* [ WORD 11 ] */ tree label; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), chain_next ("%h.gsbase.next"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops (enum tree_code, tree, tree, tree CXX_MEM_STAT_INFO); gimple gimple_build_assign_with_ops (enum tree_code, tree, tree, tree, tree CXX_MEM_STAT_INFO); gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_source_bind(var,val,stmt) \ gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, vec<tree> ); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_valist (tree, unsigned, va_list); gimple gimple_build_call_internal (enum internal_fn, unsigned, ...); gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> ); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *, vec<tree, va_gc> *, vec<tree, va_gc> *, vec<tree, va_gc> *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_eh_else (gimple_seq, gimple_seq); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (tree, tree, vec<tree> ); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_transaction (gimple_seq, tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (vec<tree> ); void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); bool gimple_call_same_target_p (const_gimple, const_gimple); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, basic_block); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree); tree gimple_extract_devirt_binfo_from_cst (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); /* Return TRUE iff stmt is a call to a built-in function. */ extern bool is_gimple_builtin_call (gimple stmt); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_canonical_type (tree); extern void print_gimple_types_stats (const char *); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_class); extern bool gimple_call_builtin_p (gimple, enum built_in_function); extern bool gimple_asm_clobbers_memory_p (const_gimple); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; vec<gimple> bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; vec<tree> case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; bool in_cleanup_point_expr; }; /* Return true if gimplify_one_sizepos doesn't need to gimplify expr (when in TYPE_SIZE{,_UNIT} and similar type/decl size/bitsize fields). */ static inline bool is_gimple_sizepos (tree expr) { /* gimplify_one_sizepos doesn't need to do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ return (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)); } extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); enum gimplify_status gimplify_self_mod_expr (tree *, gimple_seq *, gimple_seq *, bool, tree); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern vec<gimple> gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In trans-mem.c. */ extern void diagnose_tm_safe_errors (tree); extern void compute_transaction_bits (void); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (gimple_seq s) { return s; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return n; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (gimple_seq s) { return s ? s->gsbase.prev : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return n; } /* Set the last node in GIMPLE sequence *PS to LAST. */ static inline void gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last) { (*ps)->gsbase.prev = last; } /* Set the first node in GIMPLE sequence *PS to FIRST. */ static inline void gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first) { *ps = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (gimple_seq s) { return s == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimple_seq_add_stmt_without_update (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL; } static inline gimple_seq * bb_seq_addr (basic_block bb) { return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple.seq = seq; } /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_EH_ELSE: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: case GIMPLE_TRANSACTION: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline basic_block gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return LOCATION_BLOCK (g->gsbase.location); } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { if (block) g->gsbase.location = COMBINE_LOCATION_DATA (line_table, g->gsbase.location, block); else g->gsbase.location = LOCATION_LOCUS (g->gsbase.location); } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Make statement G a singleton sequence. */ static inline void gimple_init_singleton (gimple g) { g->gsbase.next = NULL; g->gsbase.prev = g; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (gimple g) { if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; if (g->gsmembase.vdef) return &g->gsmembase.vdef; return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has a MODIFIED field. */ static inline void gimple_set_modified (gimple s, bool modifiedp) { if (gimple_has_ops (s)) s->gsbase.modified = (unsigned) modifiedp; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if BB is in a transaction. */ static inline bool block_in_transaction (basic_block bb) { return flag_tm && bb->flags & BB_IN_TRANSACTION; } /* Return true if STMT is in a transaction. */ static inline bool gimple_in_transaction (gimple stmt) { return block_in_transaction (gimple_bb (stmt)); } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return true if OMP atomic load/store statement G has the GF_OMP_ATOMIC_NEED_VALUE flag set. */ static inline bool gimple_omp_atomic_need_value_p (const_gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0; } /* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */ static inline void gimple_omp_atomic_set_need_value (gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if GS performs a store to its lhs. */ static inline bool gimple_store_p (gimple gs) { tree lhs = gimple_get_lhs (gs); return lhs && !is_gimple_reg (lhs); } /* Return true if GS is an assignment that loads from its rhs1. */ static inline bool gimple_assign_load_p (gimple gs) { tree rhs; if (!gimple_assign_single_p (gs)) return false; rhs = gimple_assign_rhs1 (gs); if (TREE_CODE (rhs) == WITH_SIZE_EXPR) return true; rhs = get_base_address (rhs); return (DECL_P (rhs) || TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if S is a clobber statement. */ static inline bool gimple_clobber_p (gimple s) { return gimple_assign_single_p (s) && TREE_CLOBBER_P (gimple_assign_rhs1 (s)); } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return true if call GS calls an internal-only function, as enumerated by internal_fn. */ static inline bool gimple_call_internal_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0; } /* Return the target of internal call GS. */ static inline enum internal_fn gimple_call_internal_fn (const_gimple gs) { gcc_gimple_checking_assert (gimple_call_internal_p (gs)); return gs->gimple_call.u.internal_fn; } /* Return the function type of the function called by GS. */ static inline tree gimple_call_fntype (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); if (gimple_call_internal_p (gs)) return NULL_TREE; return gs->gimple_call.u.fntype; } /* Set the type of the function called by GS to FNTYPE. */ static inline void gimple_call_set_fntype (gimple gs, tree fntype) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gs->gimple_call.u.fntype = fntype; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* Set internal function FN to be the function called by call statement GS. */ static inline void gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (gimple_call_internal_p (gs)); gs->gimple_call.u.internal_fn = fn; } /* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL associated with the callee if known. Otherwise return NULL_TREE. */ static inline tree gimple_call_addr_fndecl (const_tree fn) { if (fn && TREE_CODE (fn) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (fn, 0); if (TREE_CODE (fndecl) == MEM_REF && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); if (TREE_CODE (fndecl) == FUNCTION_DECL) return fndecl; } return NULL_TREE; } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { return gimple_call_addr_fndecl (gimple_call_fn (gs)); } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree type = gimple_call_fntype (gs); if (type == NULL_TREE) return TREE_TYPE (gimple_call_lhs (gs)); /* The type returned by a function is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that is known to be emitted for VLA objects. Those are wrapped by stack_save/stack_restore calls and hence can't lead to unbounded stack growth even when they occur in loops. */ static inline void gimple_call_set_alloca_for_var (gimple s, bool for_var) { GIMPLE_CHECK (s, GIMPLE_CALL); if (for_var) s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR; else s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR; } /* Return true of S is a call to builtin_alloca emitted for VLA objects. */ static inline bool gimple_call_alloca_for_var_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } static inline gimple_seq * gimple_bind_body_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return &gs->gimple_bind.body; } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { return *gimple_bind_body_ptr (gs); } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.ni); return gimple_op (gs, index + gs->gimple_asm.no); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.ni); return gimple_op_ptr (gs, index + gs->gimple_asm.no); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.no, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.no); return gimple_op (gs, index); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.no); return gimple_op_ptr (gs, index); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index < gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { return *gimple_catch_handler_ptr (gs); } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return a pointer to the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq * gimple_eh_filter_failure_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.failure; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { return *gimple_eh_filter_failure_ptr (gs); } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_EH_ELSE accessors. */ static inline gimple_seq * gimple_eh_else_n_body_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return &gs->gimple_eh_else.n_body; } static inline gimple_seq gimple_eh_else_n_body (gimple gs) { return *gimple_eh_else_n_body_ptr (gs); } static inline gimple_seq * gimple_eh_else_e_body_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return &gs->gimple_eh_else.e_body; } static inline gimple_seq gimple_eh_else_e_body (gimple gs) { return *gimple_eh_else_e_body_ptr (gs); } static inline void gimple_eh_else_set_n_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.n_body = seq; } static inline void gimple_eh_else_set_e_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.e_body = seq; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return a pointer to the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq * gimple_try_eval_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return &gs->gimple_try.eval; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { return *gimple_try_eval_ptr (gs); } /* Return a pointer to the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq * gimple_try_cleanup_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return &gs->gimple_try.cleanup; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { return *gimple_try_cleanup_ptr (gs); } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return a pointer to the cleanup sequence for cleanup statement GS. */ static inline gimple_seq * gimple_wce_cleanup_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return &gs->gimple_wce.cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { return *gimple_wce_cleanup_ptr (gs); } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; if (result && TREE_CODE (result) == SSA_NAME) SSA_NAME_DEF_STMT (result) = gs; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { tree label = gimple_switch_label (gs, 0); gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label)); return label; } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label)); gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */ static inline bool gimple_debug_source_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree * gimple_debug_source_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* Return a pointer to the body for the OMP statement GS. */ static inline gimple_seq * gimple_omp_body_ptr (gimple gs) { return &gs->omp.body; } /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return *gimple_omp_body_ptr (gs); } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return a pointer to the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq * gimple_omp_for_pre_body_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.pre_body; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { return *gimple_omp_for_pre_body_ptr (gs); } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq * gimple_transaction_body_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return &gs->gimple_transaction.body; } /* Return the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq gimple_transaction_body (gimple gs) { return *gimple_transaction_body_ptr (gs); } /* Return the label associated with a GIMPLE_TRANSACTION. */ static inline tree gimple_transaction_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gimple_transaction.label; } static inline tree * gimple_transaction_label_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return &gs->gimple_transaction.label; } /* Return the subcode associated with a GIMPLE_TRANSACTION. */ static inline unsigned int gimple_transaction_subcode (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gsbase.subcode; } /* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */ static inline void gimple_transaction_set_body (gimple gs, gimple_seq body) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.body = body; } /* Set the label associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.label = label; } /* Set the subcode associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_subcode (gimple gs, unsigned int subcode) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gsbase.subcode = subcode; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statement STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return true if TYPE is a suitable type for a scalar register variable. */ static inline bool is_gimple_reg_type (tree type) { return !AGGREGATE_TYPE_P (type); } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start_1 (gimple_seq *seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (*seq); i.seq = seq; i.bb = i.ptr ? gimple_bb (i.ptr) : NULL; return i; } #define gsi_start(x) gsi_start_1(&(x)) static inline gimple_stmt_iterator gsi_none (void) { gimple_stmt_iterator i; i.ptr = NULL; i.seq = NULL; i.bb = NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq *seq; seq = bb_seq_addr (bb); i.ptr = gimple_seq_first (*seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last_1 (gimple_seq *seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (*seq); i.seq = seq; i.bb = i.ptr ? gimple_bb (i.ptr) : NULL; return i; } #define gsi_last(x) gsi_last_1(&(x)) /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq *seq; seq = bb_seq_addr (bb); i.ptr = gimple_seq_last (*seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->gsbase.next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->gsbase.next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { gimple prev = i->ptr->gsbase.prev; if (prev->gsbase.next) i->ptr = prev; else i->ptr = NULL; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return *i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *); void gsi_set_stmt (gimple_stmt_iterator *, gimple); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); bool gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, basic_block); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ BOOL_BITFIELD val_only : 1; /* True if we are currently walking the LHS of an assignment. */ BOOL_BITFIELD is_lhs : 1; /* Optional. Set to true by the callback functions if they made any changes. */ BOOL_BITFIELD changed : 1; /* True if we're interested in location information. */ BOOL_BITFIELD want_locations : 1; /* True if we've removed the statement that was processed. */ BOOL_BITFIELD removed_stmt : 1; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); gimple walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple_stmt_iterator *); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree, tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); bool gimple_val_nonnegative_real_p (tree); #endif /* GCC_GIMPLE_H */
images.h
/* This file is part of the MMA Library - https://gitlab.inria.fr/dloiseau/multipers - which is released under MIT. * See file LICENSE for full license details. * Author(s): David Loiseaux * * Copyright (C) 2021 Inria * * Modification(s): * - 2022/03 Hannah Schreiber: Integration of the new Vineyard_persistence class, renaming and cleanup. */ /** * @file images.h * @author David Loiseaux, Hannah Schreiber * @brief Functions to generate multipersistence images */ #ifndef IMAGES_H_INCLUDED #define IMAGES_H_INCLUDED #include <vector> #include <algorithm> #include <cmath> #include "approximation.h" #include "debug.h" #include "utilities.h" using Vineyard::boundary_matrix; using Vineyard::filtration_type; using Vineyard::corner_type; using Vineyard::corner_list; using Vineyard::summand_list_type; using Vineyard::dimension_type; using Vineyard::inf; using Vineyard::negInf; template<typename T> class Box; class Summand; std::vector<std::vector<std::vector<double> > > get_2D_image_from_boundary_matrix( boundary_matrix &boundaryMatrix, const std::vector<filtration_type> &filtersList, const double precision, const std::pair<corner_type, corner_type> &box, const double delta, const std::vector<unsigned int> &resolution, const dimension_type dimension, const bool complete = true, const bool verbose = false); std::vector<std::vector<double> > compute_2D_image( std::vector<Summand>& module, const double delta, const std::vector<unsigned int>& resolution, const Box<double>& box, bool verbose = true); double get_pixel_value( std::vector<Summand>& module, const corner_type& x, const double delta, double moduleWeight = -1); /** * @brief Holds the square box on which to compute. */ template<typename T> class Box { public: Box(const std::vector<T>& bottomCorner, const std::vector<T>& upperCorner) : bottomCorner_(bottomCorner), upperCorner_(upperCorner) { assert(bottomCorner.size() == upperCorner.size() && Vineyard::is_less(bottomCorner, upperCorner) && "This box is trivial !"); } Box(const std::pair<std::vector<T>, std::vector<T> >& box) : bottomCorner_(box.first), upperCorner_(box.second) {} void inflate(double delta){ #pragma omp simd for (unsigned int i = 0; i < bottomCorner_.size(); i++){ bottomCorner_[i] -= delta; upperCorner_[i] += delta; } } const std::vector<T>& getBottomCorner() const{ return bottomCorner_; }; const std::vector<T>& getUpperCorner() const{ return upperCorner_; }; private: std::vector<T> bottomCorner_; std::vector<T> upperCorner_; }; class Summand { public: Summand() : distanceTo0_(-1), updateDistance_(true) {} Summand(corner_list &summand) : summand_(summand), distanceTo0_(-1), updateDistance_(true) {} double get_interleaving() { if (updateDistance_) _compute_interleaving(); return distanceTo0_; } double get_local_weight(const corner_type& x, const double delta){ if (delta <= 0) return 0; double maxDiag = 0; std::vector<double> mini(x.size()); std::vector<double> maxi(x.size()); // box on which to compute the local weight #pragma omp simd for(unsigned int i = 0; i < x.size(); i++){ mini[i] = x[i] - delta; maxi[i] = x[i] + delta; } // Pre-allocating std::vector<corner_type> birthList(summand_.first.size()); std::vector<corner_type> deathList(summand_.second.size()); unsigned int lastEntry = 0; for (const corner_type& birth : summand_.first){ if (Vineyard::is_less(birth, maxi)){ corner_type tmpBirth(birth.size()); // WARNING should crash here if birth and x aren't of the same size. #pragma omp simd for (unsigned int i = 0; i < birth.size(); i++) tmpBirth[i] = std::max(birth[i], mini[i]); birthList[lastEntry].swap(tmpBirth); lastEntry++; } } birthList.resize(lastEntry); lastEntry = 0; for (const corner_type& death : summand_.second){ if (Vineyard::is_greater(death, mini)){ corner_type tmpDeath(death.size()); // WARNING should crash here if birth and x aren't of the same size. #pragma omp simd for (unsigned int i = 0; i < death.size(); i++) tmpDeath[i] = std::min(death[i], maxi[i]); deathList[lastEntry].swap(tmpDeath); lastEntry++; } } deathList.resize(lastEntry); for (const corner_type& birth : birthList){ if (birth.size() == 0 ) continue; for (const corner_type& death : deathList){ if (death.size() > 0) maxDiag = std::max(maxDiag, Vineyard::get_min_diagonal(birth,death)); } } return maxDiag; // should be less than delta } void swapSummand(corner_list& summand){ summand_.swap(summand); updateDistance_ = true; } private: corner_list summand_; double distanceTo0_; bool updateDistance_; void _compute_interleaving(){ distanceTo0_ = 0; for (const std::vector<double> &birth : summand_.first){ for(const std::vector<double> &death : summand_.second){ distanceTo0_ = std::max(distanceTo0_, Vineyard::get_min_diagonal(birth, death)); } } updateDistance_ = false; } }; std::vector<std::vector<std::vector<double> > > get_2D_image_from_boundary_matrix( boundary_matrix &boundaryMatrix, const std::vector<filtration_type> &filtersList, const double precision, const std::pair<corner_type, corner_type> &box, const double delta, const std::vector<unsigned int> &resolution, const dimension_type dimension, const bool complete, const bool verbose) { Box<double> bbox(box); bbox.inflate(delta); std::vector<summand_list_type> approximation = Vineyard::compute_vineyard_barcode_approximation( boundaryMatrix, filtersList, precision, std::make_pair(bbox.getBottomCorner(), bbox.getUpperCorner()), true, false, complete, false, verbose); if (dimension < 0){ std::vector<std::vector<std::vector<double>>> image_vector(approximation.size()); for (unsigned int i = 0; i < approximation.size(); i++){ std::vector<Summand> module(approximation[i].size()); for (unsigned int j = 0; j < approximation[i].size(); j++) module[j].swapSummand(approximation[i][j]); {//for Timer Debug::Timer timer("Computing image of dimension " + std::to_string(i) + " ...", verbose); image_vector[i]=compute_2D_image(module, delta, resolution, Box<double>(box), verbose); }//Timer death } return image_vector; } std::vector<Summand> module(approximation[dimension].size()); for (unsigned int i = 0; i < approximation[dimension].size(); i++) module[i].swapSummand(approximation[dimension][i]); //TODO: verify its not killed too soon Debug::Timer timer("Computing image of dimension " + std::to_string(dimension) + " ...", verbose); return {compute_2D_image(module, delta, resolution, Box<double>(box),verbose)}; } std::vector<std::vector<double> > compute_2D_image( std::vector<Summand>& module, const double delta, const std::vector<unsigned int>& resolution, const Box<double>& box, bool verbose) { // Keep dim = 2 here. We ignore other values. assert(resolution.size() >= 2); std::vector<std::vector<double> > image(resolution[0], std::vector<double>(resolution[1])); double moduleWeight = 0; {//for Timer Debug::Timer timer("Computing module weight ...", verbose); #pragma omp parallel for reduction(+ : moduleWeight) for (Summand& indModule : module){ moduleWeight += indModule.get_interleaving(); } }//Timer death if (verbose) std::cout << "Module weight : " << moduleWeight << "\n"; if (moduleWeight <= 0){ if (Debug::debug) std::cout << "!! Negative weight !!" << std::endl; return {{0}}; } double stepX = (box.getUpperCorner()[0] - box.getBottomCorner()[0]) / resolution[0]; double stepY = (box.getUpperCorner()[1] - box.getBottomCorner()[1]) / resolution[1]; {//for Timer Debug::Timer timer("Computing pixel values ...", verbose); #pragma omp parallel for collapse(2) for (unsigned int i = 0; i < resolution[0]; i++){ for (unsigned int j = 0; j < resolution[1]; j++){ corner_type x = { box.getBottomCorner()[0] + stepX * i, box.getBottomCorner()[1] + stepY * j }; image[i][j] = get_pixel_value(module, x, delta, moduleWeight); } } }//Timer death return image; } double get_pixel_value( std::vector<Summand>& module, const corner_type& x, const double delta, double moduleWeight) { double value = 0; if (moduleWeight <= 0){ moduleWeight = 0; // Computes the module weight #pragma omp parallel for reduction(+ : moduleWeight) for (Summand &ind_module : module){ moduleWeight += ind_module.get_interleaving(); } } #pragma omp parallel for reduction(+ : value) for (Summand &indModule : module) { double summandWeight = indModule.get_interleaving() / moduleWeight; double summandXWeight = indModule.get_local_weight(x, delta) / delta; value += summandWeight * summandXWeight; } return value/2; } #endif // IMAGES_H_INCLUDED
isd.c
/* Copyright (c) 2019 Valentin Vasseur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "dumer.h" #ifndef BENCHMARK #define BENCHMARK 0 #endif struct timespec timer_start() { struct timespec start_time; clock_gettime(CLOCK_MONOTONIC, &start_time); return start_time; } long timer_end(struct timespec start_time) { struct timespec end_time; clock_gettime(CLOCK_MONOTONIC, &end_time); long diff = (end_time.tv_sec - start_time.tv_sec) * (long)1e9 + (end_time.tv_nsec - start_time.tv_nsec); return diff; } static void skip_comment(FILE *file, int *c) { *c = getc(file); if (*c == '#') while ((*c = getc(file)) != EOF && *c != '\n') ; if (*c == '\n') *c = getc(file); } static int read_int(FILE *file, int *c, size_t *n) { if (*c == EOF) return 0; while (*c != EOF && *c != '\n') { if (*c >= '0' && *c <= '9') { *n *= 10; *n += *c - '0'; } else return 0; *c = getc(file); } return 1; } static int read_bin_vector(FILE *file, int *c, uint8_t *m, size_t *len) { if (*c == EOF) return 0; *len = 0; while (*c != EOF && *c != '\n') { if (*c == '1' || *c == '0') { m[(*len)++] = *c - '0'; } else return 0; *c = getc(file); } return 1; } static int read_bin_matrix(FILE *file, int rows, int *c, uint8_t *m, size_t *len) { if (*c == EOF) return 0; *len = 0; while (*c != EOF) { if (*c == '1' || *c == '0') { m[(*len)++] = *c - '0'; } else if (*c == '\n') --rows; else { return 0; } if (!rows) break; *c = getc(file); } return 1; } static int parse_input_sd(char *filename, size_t *n, size_t *k, size_t *w, uint8_t **mat_h, size_t *len_h, uint8_t **mat_s, size_t *len_s) { int ret = 0; *n = 0; *k = 0; *w = 0; size_t seed = 0; int c; FILE *file; file = fopen(filename, "r"); if (file) { skip_comment(file, &c); /* Read n. */ if (!read_int(file, &c, n)) goto end; *k = *n / 2; skip_comment(file, &c); /* Read seed. */ if (!read_int(file, &c, &seed)) goto end; skip_comment(file, &c); /* Read w. */ if (!read_int(file, &c, w)) goto end; *mat_h = malloc(*k * *k * sizeof(uint8_t)); if (!mat_h) goto end; skip_comment(file, &c); /* Read h. */ if (!read_bin_matrix(file, *k, &c, *mat_h, len_h)) goto end; *mat_s = malloc(*k * sizeof(uint8_t)); if (!mat_s) goto end; skip_comment(file, &c); /* Read s. */ if (!read_bin_vector(file, &c, *mat_s, len_s)) goto end; } else { return 0; } ret = 1; end: fclose(file); return ret; } static int parse_input_go(char *filename, size_t *n, size_t *k, size_t *w, uint8_t **mat_h, size_t *len_h, uint8_t **mat_s, size_t *len_s) { int ret = 0; *n = 0; *k = 0; *w = 0; int c; FILE *file; file = fopen(filename, "r"); if (file) { skip_comment(file, &c); /* Read n. */ if (!read_int(file, &c, n)) goto end; skip_comment(file, &c); /* Read k. */ if (!read_int(file, &c, k)) goto end; skip_comment(file, &c); /* Read w. */ if (!read_int(file, &c, w)) goto end; *mat_h = malloc(*n * *k * sizeof(uint8_t)); if (!mat_h) goto end; skip_comment(file, &c); /* Read h. */ if (!read_bin_matrix(file, *k, &c, *mat_h, len_h)) goto end; *mat_s = malloc((*n - *k) * sizeof(uint8_t)); if (!mat_s) goto end; skip_comment(file, &c); /* Read s. */ if (!read_bin_vector(file, &c, *mat_s, len_s)) goto end; } else { return 0; } ret = 1; end: fclose(file); return ret; } static int parse_input_qc(char *filename, size_t *n, size_t *k, size_t *w, uint8_t **mat_h, size_t *len_h, uint8_t **mat_s, size_t *len_s) { int ret = 0; *n = 0; *k = 0; *w = 0; int c; FILE *file; file = fopen(filename, "r"); if (file) { skip_comment(file, &c); /* Read n. */ if (!read_int(file, &c, n)) goto end; *k = *n / 2; skip_comment(file, &c); /* Read w. */ if (!read_int(file, &c, w)) goto end; *mat_h = malloc(*k * sizeof(uint8_t)); if (!mat_h) goto end; skip_comment(file, &c); /* Read h. */ if (!read_bin_vector(file, &c, *mat_h, len_h)) goto end; *mat_s = malloc(*k * sizeof(uint8_t)); if (!mat_s) goto end; skip_comment(file, &c); /* Read s. */ if (!read_bin_vector(file, &c, *mat_s, len_s)) goto end; } else { return 0; } ret = 1; end: fclose(file); return ret; } static int parse_input_lw(char *filename, size_t *n, size_t *k, size_t *w, uint8_t **mat_h, size_t *len_h) { int ret = 0; *n = 0; *k = 0; *w = 0; size_t seed = 0; int c; FILE *file; file = fopen(filename, "r"); if (file) { skip_comment(file, &c); /* Read n. */ if (!read_int(file, &c, n)) goto end; *k = *n / 2; skip_comment(file, &c); /* Read seed. */ if (!read_int(file, &c, &seed)) goto end; *mat_h = malloc(*k * *k * sizeof(uint8_t)); if (!mat_h) goto end; skip_comment(file, &c); /* Read h. */ if (!read_bin_matrix(file, *k, &c, *mat_h, len_h)) goto end; } else { return 0; } ret = 1; end: fclose(file); return ret; } int main(int argc, char *argv[]) { if (argc != 4) { fprintf(stderr, "Usage: %s [N_THREADS] [TYPE] [FILE]\n" "\n" "where TYPE is:\n" " SD for syndrome decoding\n" " QC for quasi-cyclic syndrome decoding\n" " GO for Goppa codes syndrome decoding\n" " LW for low-weight codeword finding\n", argv[0]); exit(EXIT_FAILURE); } enum type current_type; if (!strcmp(argv[2], "QC")) current_type = QC; else if (!strcmp(argv[2], "SD")) current_type = SD; else if (!strcmp(argv[2], "GO")) current_type = GO; else if (!strcmp(argv[2], "LW")) current_type = LW; else { fprintf(stderr, "Check your arguments!\n"); exit(EXIT_FAILURE); } #if !(DUMER_LW) if (current_type == LW) { fprintf(stderr, "No syndrome to decode.\n"); exit(EXIT_FAILURE); } #endif #if DUMER_DOOM if (current_type != QC) { fprintf(stderr, "Using DOOM in a non quasi-cyclic setting will " "most likely not give any meaningful result!\n"); } #endif int n_threads = atoi(argv[1]); if (n_threads < 0) { fprintf(stderr, "N_THREADS should be greater than 0.\n"); exit(EXIT_FAILURE); } size_t n, k, w; uint8_t *mat_h = NULL; uint8_t *mat_s = NULL; size_t len_h, len_s; int parsed; if (current_type == QC) parsed = parse_input_qc(argv[3], &n, &k, &w, &mat_h, &len_h, &mat_s, &len_s); else if (current_type == SD) parsed = parse_input_sd(argv[3], &n, &k, &w, &mat_h, &len_h, &mat_s, &len_s); else if (current_type == GO) parsed = parse_input_go(argv[3], &n, &k, &w, &mat_h, &len_h, &mat_s, &len_s); else if (current_type == LW) parsed = parse_input_lw(argv[3], &n, &k, &w, &mat_h, &len_h); if (!parsed) { fprintf(stderr, "Error parsing file.\n"); exit(EXIT_FAILURE); } size_t r = n - k; printf("n=%ld ", n); printf("k=%ld ", k); printf("w=%ld\n", w); printf("l=%ld ", DUMER_L); printf("p=%ld ", DUMER_P); printf("epsilon=%ld ", DUMER_EPS); printf("doom=%d\n", DUMER_DOOM); /* Birthday decoding */ size_t n1 = (k + DUMER_L) / 2; size_t n2 = k + DUMER_L - n1; if (DUMER_EPS > n2 || DUMER_EPS > n1) { fprintf(stderr, "Please lower DUMER_EPS.\n"); exit(EXIT_FAILURE); } /* Data shared by all threads and computed only once */ shr_t shr = alloc_shr(n1, n2); if (!shr) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } init_shr(shr, n, k, n1, n2); #if (BENCHMARK) <= 0 #pragma omp parallel num_threads(n_threads) { isd_t isd = alloc_isd(n, k, r, n1, n2, shr->nb_combinations1, shr->k_opt); if (!isd) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } init_isd(isd, current_type, n, k, w, mat_h, mat_s); while (1) { int found = dumer(n, k, r, n1, n2, shr, isd); if (found) { print_solution(n, isd); #if !(DUMER_LW) exit(EXIT_SUCCESS); #endif } } free_isd(isd, r); } #else isd_t *isd = malloc(n_threads * sizeof(isd_t)); if (!isd) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < n_threads; i++) { isd[i] = alloc_isd(n, k, r, n1, n2, shr->nb_combinations1, shr->k_opt); if (!isd[i]) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } init_isd(isd[i], current_type, n, k, w, mat_h, mat_s); } struct timespec vartime = timer_start(); // begin a timer called 'vartime' #pragma omp parallel num_threads(n_threads) { int i = omp_get_thread_num(); for (size_t N = 0; N < (BENCHMARK + i) / n_threads; ++N) { dumer(n, k, r, n1, n2, shr, isd[i]); } } long time_elapsed_nanos = timer_end(vartime); printf("%ld\n", time_elapsed_nanos); for (int i = 0; i < n_threads; i++) { free_isd(isd[i], r); } #endif if (mat_h) free(mat_h); if (mat_h) free(mat_s); free_shr(shr); exit(EXIT_SUCCESS); }
exact_parallel_minimum_cut.h
/****************************************************************************** * exact_parallel_minimum_cut.h * * Source of VieCut. * ****************************************************************************** * Copyright (C) 2018 Alexander Noe <alexander.noe@univie.ac.at> * * Published under the MIT license in the LICENSE file. *****************************************************************************/ #pragma once #include <algorithm> #include <cstdint> #include <cstdlib> #include <deque> #include <functional> #include <memory> #include <unordered_map> #include <vector> #include "algorithms/global_mincut/minimum_cut_helpers.h" #include "algorithms/global_mincut/noi_minimum_cut.h" #include "algorithms/global_mincut/viecut.h" #include "common/configuration.h" #include "common/definitions.h" #include "data_structure/graph_access.h" #include "data_structure/priority_queues/fifo_node_bucket_pq.h" #include "data_structure/priority_queues/maxNodeHeap.h" #include "data_structure/priority_queues/node_bucket_pq.h" #include "tools/random_functions.h" #include "tools/timer.h" #ifdef PARALLEL #include "parallel/coarsening/contract_graph.h" #include "parallel/coarsening/contraction_tests.h" #include "parallel/coarsening/sparsify.h" #include "parallel/data_structure/union_find.h" #else #include "coarsening/contract_graph.h" #include "coarsening/contraction_tests.h" #include "coarsening/sparsify.h" #include "data_structure/union_find.h" #endif template <class GraphPtr> class exact_parallel_minimum_cut : public minimum_cut { public: typedef GraphPtr GraphPtrType; exact_parallel_minimum_cut() { } ~exact_parallel_minimum_cut() { } static constexpr bool debug = false; bool timing = configuration::getConfig()->verbose; EdgeWeight perform_minimum_cut(GraphPtr G) { return perform_minimum_cut(G, false); } EdgeWeight perform_minimum_cut(GraphPtr G, bool indirect) { if (!G) { return -1; } std::vector<GraphPtr> graphs; timer t; EdgeWeight mincut = G->getMinDegree(); #ifdef PARALLEL viecut<GraphPtr> heuristic_mc; mincut = heuristic_mc.perform_minimum_cut(G, true); LOGC(timing) << "VieCut found cut " << mincut << " [Time: " << t.elapsed() << "s]"; #endif graphs.push_back(G); // if PARALLEL is set, NodeInCut are already set to the result of viecut // This is what we want. #ifndef PARALLEL minimum_cut_helpers<GraphPtr>::setInitialCutValues(graphs); #endif while (graphs.back()->number_of_nodes() > 2 && mincut > 0) { GraphPtr curr_g = graphs.back(); timer ts; #ifdef PARALLEL noi_minimum_cut<GraphPtr> noi; auto uf = parallel_modified_capforest(curr_g, mincut); if (uf.n() == curr_g->number_of_nodes()) { uf = noi.modified_capforest(curr_g, mincut); LOGC(timing) << "seq capforest needed"; } #else LOG1 << "Error: Running exact_parallel_minimum_cut without PARALLEL" << " Using normal noi_minimum_cut instead!"; noi_minimum_cut noi; auto uf = noi.modified_capforest(curr_g, mincut); #endif if (uf.n() > 1) { graphs.push_back(contraction::fromUnionFind(curr_g, &uf, true)); mincut = minimum_cut_helpers<GraphPtr>::updateCut( graphs, mincut); } else { break; } } if (!indirect && configuration::getConfig()->save_cut) minimum_cut_helpers<GraphPtr>::retrieveMinimumCut(graphs); return mincut; } std::vector<NodeID> randomStartNodes(GraphPtr G) { std::vector<NodeID> start_nodes; for (int i = 0; i < omp_get_max_threads(); ++i) start_nodes.push_back( random_functions::next() % G->number_of_nodes()); return start_nodes; } std::vector<NodeID> bfsStartNodes(GraphPtr G) { NodeID starting_node = random_functions::next() % G->number_of_nodes(); std::vector<NodeID> start_nodes; start_nodes.push_back(starting_node); for (int i = 1; i < omp_get_max_threads(); ++i) { std::deque<NodeID> bfs; std::vector<bool> nodes(G->number_of_nodes(), false); size_t found = i; for (auto el : start_nodes) { bfs.push_back(el); nodes[el] = true; } while (!bfs.empty() && found < G->number_of_nodes()) { NodeID no = bfs.front(); bfs.pop_front(); for (EdgeID e : G->edges_of(no)) { NodeID tgt = G->getEdgeTarget(e); if (!nodes[tgt]) { found++; nodes[tgt] = true; bfs.push_back(tgt); if (found == G->number_of_nodes()) { start_nodes.push_back(tgt); break; } } } } } return start_nodes; } union_find parallel_modified_capforest( GraphPtr G, const EdgeWeight mincut, const bool disable_blacklist = false) { union_find uf(G->number_of_nodes()); timer t; timer timer2; std::vector<NodeID> start_nodes = randomStartNodes(G); // std::vector<bool> would be bad for thread-safety std::vector<uint8_t> visited(G->number_of_nodes(), false); std::vector<size_t> times(G->number_of_nodes(), 0); #pragma omp parallel for for (int i = 0; i < omp_get_num_threads(); ++i) { fifo_node_bucket_pq pq(G->number_of_nodes(), mincut + 1); std::vector<bool> blacklisted(G->number_of_nodes(), false); std::vector<NodeID> r_v(G->number_of_nodes(), 0); std::vector<bool> local_visited(G->number_of_nodes(), false); NodeID starting_node = start_nodes[i]; NodeID current_node = starting_node; pq.insert(current_node, 0); timer t; size_t elements = 0; while (!pq.empty()) { current_node = pq.deleteMax(); elements++; local_visited[current_node] = true; if (!disable_blacklist) { blacklisted[current_node] = true; if (visited[current_node]) { continue; } else { visited[current_node] = true; } } for (EdgeID e : G->edges_of(current_node)) { auto [tgt, wgt] = G->getEdge(current_node, e); if (!local_visited[tgt]) { if (r_v[tgt] < mincut) { if ((r_v[tgt] + wgt) >= mincut) { if (!blacklisted[tgt]) { uf.Union(current_node, tgt); } } if (!visited[tgt]) { size_t new_rv = std::min(r_v[tgt] + wgt, mincut); r_v[tgt] = new_rv; if (!visited[tgt] && !local_visited[tgt]) { if (pq.contains(tgt)) { pq.increaseKey(tgt, new_rv); } else { pq.insert(tgt, new_rv); } } } } } } } } return uf; } };
dataset.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <string> #include <functional> #include <memory> #include <mutex> #include <unordered_set> #include <utility> #include <vector> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, query level informations. * * Some details: * 1. Label, used for training. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed) * the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1]) * 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null constructor */ Metadata(); /*! * \brief Initialization will load query level informations, since it is need for sampling data * \param data_filename Filename of data */ void Init(const char* data_filename); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indices of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int NumFeatures() const = 0; /*! * \brief Create an object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx); }; struct TrainingShareStates { int num_threads = 0; bool is_colwise = true; bool is_use_subcol = false; bool is_use_subrow = false; bool is_subrow_copied = false; bool is_constant_hessian = true; const data_size_t* bagging_use_indices; data_size_t bagging_indices_cnt; int num_bin_aligned; std::unique_ptr<MultiValBin> multi_val_bin; std::unique_ptr<MultiValBin> multi_val_bin_subset; std::vector<uint32_t> hist_move_src; std::vector<uint32_t> hist_move_dest; std::vector<uint32_t> hist_move_size; std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>> hist_buf; void SetMultiValBin(MultiValBin* bin) { num_threads = OMP_NUM_THREADS(); if (bin == nullptr) { return; } multi_val_bin.reset(bin); num_bin_aligned = (bin->num_bin() + kAlignedSize - 1) / kAlignedSize * kAlignedSize; size_t new_size = static_cast<size_t>(num_bin_aligned) * 2 * num_threads; if (new_size > hist_buf.size()) { hist_buf.resize(static_cast<size_t>(num_bin_aligned) * 2 * num_threads); } } hist_t* TempBuf() { if (!is_use_subcol) { return nullptr; } return hist_buf.data() + hist_buf.size() - num_bin_aligned * 2; } void HistMove(const hist_t* src, hist_t* dest) { if (!is_use_subcol) { return; } #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(hist_move_src.size()); ++i) { std::copy_n(src + hist_move_src[i], hist_move_size[i], dest + hist_move_dest[i]); } } }; /*! \brief The main class of data set, * which are used to training or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>* bin_mappers, int num_total_features, const std::vector<std::vector<double>>& forced_bins, int** sample_non_zero_indices, double** sample_values, const int* num_per_col, int num_sample_col, size_t total_sample_cnt, const Config& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool>& is_feature_added) { if (is_finish_load_) { return; } for (auto fidx : feature_need_push_zeros_) { if (is_feature_added[fidx]) { continue; } const int group = feature2group_[fidx]; const int sub_feature = feature2subfeature_[fidx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f); } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } std::vector<bool> is_feature_added(num_features_, false); for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { is_feature_added[feature_idx] = true; const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } FinishOneRow(tid, row_idx, is_feature_added); } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubrow(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); MultiValBin* GetMultiBinFromSparseFeatures() const; MultiValBin* GetMultiBinFromAllFeatures() const; TrainingShareStates* GetShareStates( score_t* gradients, score_t* hessians, const std::vector<int8_t>& is_feature_used, bool is_constant_hessian, bool force_colwise, bool force_rowwise) const; LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void InitTrain(const std::vector<int8_t>& is_feature_used, TrainingShareStates* share_state) const; template <bool USE_INDICES, bool USE_HESSIAN> void ConstructHistogramsInner(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, TrainingShareStates* share_state, hist_t* hist_data) const; template <bool USE_INDICES, bool ORDERED> void ConstructHistogramsMultiVal(const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, TrainingShareStates* share_state, hist_t* hist_data) const; inline void ConstructHistograms( const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, TrainingShareStates* share_state, hist_t* hist_data) const { if (num_data <= 0) { return; } bool use_indices = data_indices != nullptr && (num_data < num_data_); if (share_state->is_constant_hessian) { if (use_indices) { ConstructHistogramsInner<true, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } else { if (use_indices) { ConstructHistogramsInner<true, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } } void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, hist_t* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split( sub_feature, threshold, num_threshold, default_left, data_indices, cnt, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline bool IsMultiGroup(int i) const { return feature_groups_[i]->is_multi_val_; } inline size_t FeatureGroupSizesInByte(int group) const { return feature_groups_[group]->FeatureGroupSizesInByte(); } inline void* FeatureGroupData(int group) const { return feature_groups_[group]->FeatureGroupData(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); std::unordered_set<std::string> feature_name_set; // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name : feature_names_) { // check json if (!Common::CheckAllowedJSON(feature_name)) { Log::Fatal("Do not support special JSON characters in feature name."); } if (feature_name.find(' ') != std::string::npos) { spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } if (feature_name_set.count(feature_name) > 0) { Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str()); } feature_name_set.insert(feature_name); } if (spaceInFeatureName) { Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; ++i) { int fidx = used_feature_map_[i]; if (fidx < 0) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info_string()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; void AddFeaturesFrom(Dataset* other); private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; bool is_finish_load_; int max_bin_; std::vector<int32_t> max_bin_by_feature_; std::vector<std::vector<double>> forced_bin_bounds_; int bin_construct_sample_cnt_; int min_data_in_bin_; bool use_missing_; bool zero_as_missing_; std::vector<int> feature_need_push_zeros_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
ksdensity3d.c
#include "mex.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* inputs */ double *data; double *grid_x; double *grid_y; double *grid_z; double *bandwidth; double *box; double *weight; /* outputs */ double *f; /* working variables */ size_t nstep; size_t nx, ny, nz; double dx, dy, dz; double dgrid_x, dgrid_y, dgrid_z; mwSize dims[3]; size_t i, j, istep; size_t ix, iy, iz; size_t ix_min, ix_max; size_t iy_min, iy_max; size_t iz_min, iz_max; double rx, ry, rz; double *gaussx, *gaussy, *gaussz; double *f_private; size_t is_box; size_t alloc_bandwidth; size_t alloc_weight; size_t *ix_array; size_t *iy_array; size_t *iz_array; size_t ix_count; size_t iy_count; size_t iz_count; /* check inputs and outputs */ if (nrhs < 5) { mexErrMsgTxt("MEX: Not enough input arguments. MEX version requires both grids and bandwidth."); } if (nlhs > 1) { mexErrMsgTxt("MEX: Too many output arguments."); } /* get inputs */ data = mxGetPr(prhs[0]); grid_x = mxGetPr(prhs[1]); grid_y = mxGetPr(prhs[2]); grid_z = mxGetPr(prhs[3]); nstep = mxGetM(prhs[0]); nx = mxGetNumberOfElements(prhs[1]); ny = mxGetNumberOfElements(prhs[2]); nz = mxGetNumberOfElements(prhs[3]); dims[0] = (mwSize) nx; dims[1] = (mwSize) ny; dims[2] = (mwSize) nz; #ifdef DEBUG mexPrintf("MEX: nstep = %zu\n", nstep); mexPrintf("MEX: nx = %zu\n", nx); mexPrintf("MEX: ny = %zu\n", ny); mexPrintf("MEX: nz = %zu\n", nz); #endif /* setup: bandwidth */ dgrid_x = grid_x[1] - grid_x[0]; dgrid_y = grid_y[1] - grid_y[0]; dgrid_z = grid_z[1] - grid_z[0]; /* setup: bandwidth */ bandwidth = NULL; if (nrhs > 4) { if (mxGetNumberOfElements(prhs[4]) != 0) { alloc_bandwidth = 0; /* not allocated */ bandwidth = mxGetPr(prhs[4]); rx = bandwidth[0]*5.0; ry = bandwidth[1]*5.0; rz = bandwidth[2]*5.0; } } if (bandwidth == NULL) { /* ................TODO................ */ /* alloc_bandwidth = 1; */ /* bandwidth = (double *) malloc(3*sizeof(double)); */ /* mexErrMsgTxt("MEX: In MEX version, please specify bandwidth explicitly"); */ } #ifdef DEBUG mexPrintf("MEX: bandwidth in x-axis: %f\n", bandwidth[0]); mexPrintf("MEX: bandwidth in y-axis: %f\n", bandwidth[1]); mexPrintf("MEX: bandwidth in z-axis: %f\n", bandwidth[2]); #endif /* setup: box */ box = NULL; is_box = 0; if (nrhs > 5) { if (mxGetNumberOfElements(prhs[5]) != 0) { is_box = 1; /* box is turned on */ box = mxGetPr(prhs[5]); } } /* setup: weight */ weight = NULL; if (nrhs > 6) { if (mxGetNumberOfElements(prhs[6]) != 0) { alloc_weight = 0; /* not allocated */ weight = mxGetPr(prhs[6]); } } if (weight == NULL) { alloc_weight = 1; /* allocated */ weight = (double *) malloc(nstep*sizeof(double)); for (istep = 0; istep < nstep; istep++) { weight[istep] = 1.0/(double)nstep; } } /* setup: f */ plhs[0] = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL); f = mxGetPr(plhs[0]); for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < ny; iy++) { for (iz = 0; iz < nz; iz++) { f[iz*ny*nx + iy*nx + ix] = 0.0; } } } /* calculation */ if (is_box) { #pragma omp parallel \ default(none) \ private(istep, ix, ix_min, ix_max, iy, iy_min, iy_max, iz, iz_min, iz_max, dx, dy, dz, gaussx, gaussy, gaussz, f_private, ix_count, iy_count, iz_count, ix_array, iy_array, iz_array) \ shared(nstep, grid_x, grid_y, grid_z, dgrid_x, dgrid_y, dgrid_z, rx, ry, rz, data, bandwidth, weight, f, nx, ny, nz, alloc_bandwidth, alloc_weight, box) { f_private = (double *) malloc(nx*ny*nz*sizeof(double)); gaussx = (double *) malloc(nx*sizeof(double)); gaussy = (double *) malloc(ny*sizeof(double)); gaussz = (double *) malloc(nz*sizeof(double)); ix_array = (size_t *) malloc(nx*sizeof(size_t)); iy_array = (size_t *) malloc(ny*sizeof(size_t)); iz_array = (size_t *) malloc(nz*sizeof(size_t)); for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < ny; iy++) { for (iz = 0; iz < nz; iz++) { f_private[iz*ny*nx + iy*nx + ix] = 0.0; } } } for (ix = 0; ix < nx; ix++) { gaussx[ix] = 0.0; } for (iy = 0; iy < ny; iy++) { gaussy[iy] = 0.0; } for (iz = 0; iz < nz; iz++) { gaussz[iz] = 0.0; } #pragma omp for for (istep = 0; istep < nstep; istep++) { ix_count = 0; for (ix = 0; ix < nx; ix++) { dx = data[istep + nstep*0] - grid_x[ix]; dx = dx - round(dx/box[0])*box[0]; if (fabs(dx) < rx) { dx = dx/bandwidth[0]; gaussx[ix_count] = exp(-0.5*dx*dx)/(sqrt(2*M_PI)*bandwidth[0]); ix_array[ix_count] = ix; ix_count++; } } iy_count = 0; for (iy = 0; iy < ny; iy++) { dy = data[istep + nstep*1] - grid_y[iy]; dy = dy - round(dy/box[1])*box[1]; if (fabs(dy) < ry) { dy = dy/bandwidth[1]; gaussy[iy_count] = exp(-0.5*dy*dy)/(sqrt(2*M_PI)*bandwidth[1]); iy_array[iy_count] = iy; iy_count++; } } iz_count = 0; for (iz = 0; iz < nz; iz++) { dz = data[istep + nstep*2] - grid_z[iz]; dz = dz - round(dz/box[2])*box[2]; if (fabs(dz) < rz) { dz = dz/bandwidth[2]; gaussz[iz_count] = exp(-0.5*dz*dz)/(sqrt(2*M_PI)*bandwidth[2]); iz_array[iz_count] = iz; iz_count++; } } for (ix = 0; ix < ix_count; ix++) { for (iy = 0; iy < iy_count; iy++) { for (iz = 0; iz < iz_count; iz++) { f_private[iz_array[iz]*ny*nx + iy_array[iy]*nx + ix_array[ix]] += weight[istep]*gaussx[ix]*gaussy[iy]*gaussz[iz]; } } } for (ix = 0; ix < nx; ix++) { gaussx[ix] = 0.0; } for (iy = 0; iy < ny; iy++) { gaussy[iy] = 0.0; } for (iz = 0; iz < nz; iz++) { gaussz[iz] = 0.0; } } /* pragma omp for */ #pragma omp critical for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < ny; iy++) { for (iz = 0; iz < nz; iz++) { f[iz*ny*nx + iy*nx + ix] += f_private[iz*ny*nx + iy*nx + ix]; } } } if (f_private != NULL) { free(f_private); } if (gaussx != NULL) { free(gaussx); } if (gaussy != NULL) { free(gaussy); } if (gaussz != NULL) { free(gaussz); } if (ix_array != NULL) { free(ix_array); } if (iy_array != NULL) { free(iy_array); } if (iz_array != NULL) { free(iz_array); } } /* pragma omp parallel */ } else { #pragma omp parallel \ default(none) \ private(istep, ix, ix_min, ix_max, iy, iy_min, iy_max, iz, iz_min, iz_max, dx, dy, dz, gaussx, gaussy, gaussz, f_private) \ shared(nstep, grid_x, grid_y, grid_z, dgrid_x, dgrid_y, dgrid_z, rx, ry, rz, data, bandwidth, weight, f, nx, ny, nz, alloc_bandwidth, alloc_weight) { f_private = (double *) malloc(nx*ny*nz*sizeof(double)); gaussx = (double *) malloc(nx*sizeof(double)); gaussy = (double *) malloc(ny*sizeof(double)); gaussz = (double *) malloc(nz*sizeof(double)); for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < ny; iy++) { for (iz = 0; iz < nz; iz++) { f_private[iz*ny*nx + iy*nx + ix] = 0.0; } } } for (ix = 0; ix < nx; ix++) { gaussx[ix] = 0.0; } for (iy = 0; iy < ny; iy++) { gaussy[iy] = 0.0; } for (iz = 0; iz < nz; iz++) { gaussz[iz] = 0.0; } #pragma omp for for (istep = 0; istep < nstep; istep++) { dx = data[istep + nstep*0] - grid_x[0]; ix_min = (size_t) ((dx - rx)/dgrid_x); ix_min = ix_min > 0 ? ix_min : 0; ix_max = ((size_t) ((dx + rx)/dgrid_x)) + 1; ix_max = ix_max < nx ? ix_max : nx; /* mexPrintf("MEX: ix_min = %zu\n", ix_min); */ /* mexPrintf("MEX: ix_max = %zu\n", ix_max); */ for (ix = ix_min; ix < ix_max; ix++) { dx = (grid_x[ix] - data[istep + nstep*0])/bandwidth[0]; gaussx[ix] = exp(-0.5*dx*dx)/(sqrt(2*M_PI)*bandwidth[0]); } dy = data[istep + nstep*1] - grid_y[0]; iy_min = (size_t) ((dy - ry)/dgrid_y); iy_min = iy_min > 0 ? iy_min : 0; iy_max = ((size_t) ((dy + ry)/dgrid_y)) + 1; iy_max = iy_max < ny ? iy_max : ny; for (iy = iy_min; iy < iy_max; iy++) { dy = (grid_y[iy] - data[istep + nstep*1])/bandwidth[1]; gaussy[iy] = exp(-0.5*dy*dy)/(sqrt(2*M_PI)*bandwidth[1]); } dz = data[istep + nstep*2] - grid_z[0]; iz_min = (size_t) ((dz - rz)/dgrid_z); iz_min = iz_min > 0 ? iz_min : 0; iz_max = ((size_t) ((dz + rz)/dgrid_z)) + 1; iz_max = iz_max < nz ? iz_max : nz; for (iz = iz_min; iz < iz_max; iz++) { dz = (grid_z[iz] - data[istep + nstep*2])/bandwidth[2]; gaussz[iz] = exp(-0.5*dz*dz)/(sqrt(2*M_PI)*bandwidth[2]); } for (ix = ix_min; ix < ix_max; ix++) { for (iy = iy_min; iy < iy_max; iy++) { for (iz = iz_min; iz < iz_max; iz++) { f_private[iz*ny*nx + iy*nx + ix] += weight[istep]*gaussx[ix]*gaussy[iy]*gaussz[iz]; } } } for (ix = ix_min; ix < ix_max; ix++) { gaussx[ix] = 0.0; } for (iy = iy_min; iy < iy_max; iy++) { gaussy[iy] = 0.0; } for (iz = iz_min; iz < iz_max; iz++) { gaussz[iz] = 0.0; } } /* pragma omp for */ #pragma omp critical for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < ny; iy++) { for (iz = 0; iz < nz; iz++) { f[iz*ny*nx + iy*nx + ix] += f_private[iz*ny*nx + iy*nx + ix]; } } } if (f_private != NULL) { free(f_private); } if (gaussx != NULL) { free(gaussx); } if (gaussy != NULL) { free(gaussy); } if (gaussz != NULL) { free(gaussz); } } /* pragma omp parallel */ } /* is_box */ if (alloc_bandwidth) { free(bandwidth); } if (alloc_weight) { free(weight); } /* exit(EXIT_SUCCESS); */ }
GB_unaryop__lnot_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint8 // op(A') function: GB_tran__lnot_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hypre_merge_sort.c
#include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <assert.h> #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /* union of two sorted (in ascending order) array arr1 and arr2 into arr3 * Assumption: no duplicates in arr1 and arr2 * arr3 should have enough space on entry * map1 and map2 map arr1 and arr2 to arr3 */ void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #endif static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT assert(left <= right && right <= k); assert(i < k); // i == k implies left == right == k that can never happen assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT assert(*out1 + *out2 == k); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT assert(left <= right && right <= k); assert(i < k); // i == k implies left == right == k that can never happen assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT assert(*out1 + *out2 == k); #endif } #endif /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT assert(std::is_sorted(first1, last1)); assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT assert(begin1 <= end1); assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT assert(std::is_sorted(first1, last1)); assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT assert(begin1 <= end1); assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #endif void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } } assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } void hypre_big_sort_and_create_inverse_map( HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } } assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
kernel_cpu.c
/* Copyright (c)2008-2011 University of Virginia All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted without royalty fees or other restrictions, provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Virginia, the Dept. of Computer Science, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF VIRGINIA OR THE SOFTWARE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef __cplusplus extern "C" { #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 //#include <omp.h> // (in path known to compiler) needed by openmp #include <stdlib.h> // (in path known to compiler) needed by malloc #include <stdio.h> // (in path known to compiler) needed by printf #include <math.h> // (in path known to compiler) needed by exp //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input variables //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_CPU FUNCTION HEADER //======================================================================================================================================================150 #include "kernel_cpu.h" // (in the current directory) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu( par_str par, dim_str dim, box_str* box, FOUR_VECTOR* rv, fp* qv, FOUR_VECTOR* fv) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // parameters fp alpha; fp a2; // counters int i, j, k, l; // home box long first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; // neighbor box int pointer; long first_j; FOUR_VECTOR* rB; fp* qB; // common fp r2; fp u2; fp fs; fp vij; fp fxij,fyij,fzij; THREE_VECTOR d; //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 //omp_set_num_threads(dim.cores_arg); //======================================================================================================================================================150 // INPUTS //======================================================================================================================================================150 alpha = par.alpha; a2 = 2.0*alpha*alpha; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 /* #pragma omp parallel for \ private(i, j, k) \ private(first_i, rA, fA) \ private(pointer, first_j, rB, qB) \ private(r2, u2, fs, vij, fxij, fyij, fzij, d) */ for(l=0; l<dim.number_boxes; l=l+1){ //------------------------------------------------------------------------------------------100 // home box - box parameters //------------------------------------------------------------------------------------------100 first_i = box[l].offset; // offset to common arrays //------------------------------------------------------------------------------------------100 // home box - distance, force, charge and type parameters from common arrays //------------------------------------------------------------------------------------------100 rA = &rv[first_i]; fA = &fv[first_i]; //------------------------------------------------------------------------------------------100 // Do for the # of (home+neighbor) boxes //------------------------------------------------------------------------------------------100 for (k=0; k<(1+box[l].nn); k++) { //----------------------------------------50 // neighbor box - get pointer to the right box //----------------------------------------50 if(k==0){ pointer = l; // set first box to be processed to home box } else{ pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes } //----------------------------------------50 // neighbor box - box parameters //----------------------------------------50 first_j = box[pointer].offset; //----------------------------------------50 // neighbor box - distance, force, charge and type parameters //----------------------------------------50 rB = &rv[first_j]; qB = &qv[first_j]; //----------------------------------------50 // Do for the # of particles in home box //----------------------------------------50 for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){ // do for the # of particles in current (home or neighbor) box for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){ // // coefficients r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]); u2 = a2*r2; vij= exp(-u2); fs = 2.*vij; d.x = rA[i].x - rB[j].x; d.y = rA[i].y - rB[j].y; d.z = rA[i].z - rB[j].z; fxij=fs*d.x; fyij=fs*d.y; fzij=fs*d.z; // forces fA[i].v += qB[j]*vij; fA[i].x += qB[j]*fxij; fA[i].y += qB[j]*fyij; fA[i].z += qB[j]*fzij; } // for j } // for i } // for k } // for l /* for (i = 0; i < NUMBER_PAR_PER_BOX; ++i) { fprintf(stderr, "%f %f %f %f\n", fA[i].v, fA[i].x, fA[i].y, fA[i].z); } */ } // main #ifdef __cplusplus } #endif
geli_fmt_plug.c
/* * JtR format to crack password protected FreeBSD GELI volumes. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_geli; #elif FMT_REGISTERS_H john_register_one(&fmt_geli); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "hmac_sha.h" #include "aes.h" #include "pbkdf2_hmac_sha512.h" #include "jumbo.h" #include "memdbg.h" #include "geli_common.h" #define FORMAT_LABEL "geli" #define FORMAT_NAME "FreeBSD GELI" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static custom_salt *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (custom_salt *)salt; } static void geli_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*cracked_count); #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN]; unsigned char key[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN]; int i; #ifdef SIMD_COEF_64 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, pout, G_ELI_USERKEYLEN, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, master[i], G_ELI_USERKEYLEN, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { JTR_hmac_sha512((const unsigned char*)"", 0, master[i], G_ELI_USERKEYLEN, key[i], G_ELI_USERKEYLEN); cracked[index+i] = geli_decrypt_verify(cur_salt, key[i]); } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_geli = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, geli_tests }, { init, done, fmt_default_reset, fmt_default_prepare, geli_common_valid, fmt_default_split, fmt_default_binary, geli_common_get_salt, { geli_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, geli_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImage) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if (((sharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImage) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,kuwahara_image,image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult), q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImage) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if (((shade_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,center) <= (QuantumRange/2))) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if (((unsharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImage) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
eavlCombinedTopologyMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_COMBINED_TOPOLOGY_MAP_OP_H #define EAVL_COMBINED_TOPOLOGY_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlCombinedTopologyMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN0, class IN1, class OUT> static void call(int nitems, CONN &conn, const IN0 s_inputs, const IN1 d_inputs, OUT outputs, F &functor) { int ids[MAX_LOCAL_TOPOLOGY_IDS]; #pragma omp parallel for private(ids) for (int index = 0; index < nitems; ++index) { int nids; int shapeType = conn.GetElementComponents(index, nids, ids); typename collecttype<IN1>::const_type in_d(collect(index, d_inputs)); typename collecttype<OUT>::type out(collect(index, outputs)); out = functor(shapeType, nids, ids, s_inputs, in_d); } } }; #if defined __CUDACC__ template <class F, class IN0, class IN1, class OUT> __global__ void eavlCombinedTopologyMapOp_kernel(int nitems, CONN &conn, const IN0 s_inputs, const IN1 d_inputs, OUT outputs, F functor) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; int ids[MAX_LOCAL_TOPOLOGY_IDS]; for (int index = threadID; index < nitems; index += numThreads) { int nids; int shapeType = conn.GetElementComponents(index, nids, ids); collect(index, outputs) = functor(shapeType, nids, ids, s_inputs, collect(index, d_inputs)); } } template <class CONN> struct eavlCombinedTopologyMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN0, class IN1, class OUT> static void call(int nitems, CONN &conn, const IN0 s_inputs, const IN1 d_inputs, OUT outputs, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlCombinedTopologyMapOp_kernel<<< blocks, threads >>>(nitems, conn, s_inputs, d_inputs, outputs, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlCombinedTopologyMapOp // // Purpose: /// Map from one topological element in a mesh to another, with /// input arrays on both the source and destination topology /// and with outputs on the destination topology. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class IS, class ID, class O, class F> class eavlCombinedTopologyMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; IS s_inputs; ID d_inputs; O outputs; F functor; public: eavlCombinedTopologyMapOp(eavlCellSet *c, eavlTopology t, IS is, ID id, O o, F f) : cells(c), topology(t), s_inputs(is), d_inputs(id), outputs(o), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlCombinedTopologyMapOp_CPU<eavlExplicitConnectivity> >(n, conn, s_inputs, d_inputs, outputs, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlCombinedTopologyMapOp_CPU<eavlRegularConnectivity> >(n, conn, s_inputs, d_inputs, outputs, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlCombinedTopologyMapOp_GPU<eavlExplicitConnectivity> >(n, conn, s_inputs, d_inputs, outputs, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlCombinedTopologyMapOp_GPU<eavlRegularConnectivity> >(n, conn, s_inputs, d_inputs, outputs, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class IS, class ID, class O, class F> eavlCombinedTopologyMapOp<IS,ID,O,F> *new_eavlCombinedTopologyMapOp(eavlCellSet *c, eavlTopology t, IS is, ID id, O o, F f) { return new eavlCombinedTopologyMapOp<IS,ID,O,F>(c,t,is,id,o,f); } #endif
jtmodel.c
/* Copyright 2013-2016. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014-2016 Jonathan Tamir <jtamir@eecs.berkeley.edu> */ #include <string.h> #include <complex.h> #include <assert.h> #include <math.h> #include <stdbool.h> #ifdef USE_MKL #include <mkl.h> #endif #include "num/multind.h" #include "num/flpmath.h" #include "num/fft.h" #include "num/ops.h" #include "num/iovec.h" #include "linops/linop.h" #include "linops/someops.h" #include "misc/types.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/debug.h" #include "jtmodel.h" #ifdef USE_INTEL_KERNELS #include "t2sh_intel_kernels.h" #endif struct jtmodel_data { INTERFACE(linop_data_t); long cfksp_dims[DIMS]; long cfimg_dims[DIMS]; const struct linop_s* sense_op; const struct operator_s* stkern_op; complex float* cfksp; complex float* cfksp3; complex float* cfksp4; complex float* sens; long sens_dims[DIMS]; #ifdef USE_INTEL_KERNELS DFTI_DESCRIPTOR_HANDLE plan1d_0; DFTI_DESCRIPTOR_HANDLE plan1d_1; DFTI_DESCRIPTOR_HANDLE plan2d; #endif }; DEF_TYPEID(jtmodel_data); struct stkern_data { INTERFACE(operator_data_t); long fake_cfksp_dims[DIMS]; long cfksp_dims[DIMS]; long stkern_dims[DIMS]; const complex float* stkern_mat; const float* stkern_mat_trans; }; DEF_TYPEID(stkern_data); /** * Create T2Sh kernel, Psi = Phi^H P Phi, * where Phi is the basis and P is the sampling pattern */ extern void create_stkern_mat(complex float* stkern_mat, const long pat_dims[DIMS], const complex float* pat, const long bas_dims[DIMS], const complex float* bas) { #if 0 fmac mask bas tmp transpose 6 15 tmp tmp2 t2sh_proj -K4 tmp2 bas tmp3 transpose 5 6 tmp3 tmp4 transpose 6 15 tmp4 tmp5 #endif // ----------------------------------------------------------- // initialize dimensions and strides long max_dims[DIMS + 1]; // [X Y Z 1 1 T K A B C ... 1 1] long trp_dims[DIMS + 1]; // [X Y Z 1 1 T 1 A B C ... 1 K] long tproj_dims[DIMS + 1]; // [X Y Z 1 1 1 K A B C ... 1 K] long tproj2_dims[DIMS + 1]; // [X Y Z 1 1 K 1 A B C ... 1 K] long stkern_dims[DIMS + 1]; // [X Y Z 1 1 K K A B C ... 1 1] long fake_bas_dims[DIMS + 1]; // [1 1 1 1 1 T K 1 1 1 ... 1 1] long max_strs[DIMS]; long bas_strs[DIMS]; long pat_strs[DIMS]; for (unsigned int i = 0; i < DIMS; i++) { assert((pat_dims[i] == bas_dims[i]) || (1 == pat_dims[i]) || (1 == bas_dims[i])); max_dims[i] = (1 == pat_dims[i]) ? bas_dims[i] : pat_dims[i]; } max_dims[DIMS] = 1; // stick the COEFF_DIM into the extra dummy dimension, so that it doesn't get overwritten by the projection md_select_dims(DIMS + 1, ~COEFF_FLAG, trp_dims, max_dims); trp_dims[DIMS] = max_dims[COEFF_DIM]; // the t2sh_proj will squash the TE_DIM and set COEFF_DIM to K md_select_dims(DIMS + 1, ~TE_FLAG, tproj_dims, trp_dims); tproj_dims[COEFF_DIM] = bas_dims[COEFF_DIM]; // also want tproj with TE_DIM in the right place md_transpose_dims(DIMS + 1, TE_DIM, COEFF_DIM, tproj2_dims, tproj_dims); // final result is a symmetric matrix with possibly higher-level dims md_transpose_dims(DIMS + 1, COEFF_DIM, DIMS, stkern_dims, tproj2_dims); // same as basis, but with the extra dummy dimension md_copy_dims(DIMS, fake_bas_dims, bas_dims); fake_bas_dims[DIMS] = 1; md_calc_strides(DIMS, max_strs, max_dims, CFL_SIZE); md_calc_strides(DIMS, bas_strs, bas_dims, CFL_SIZE); md_calc_strides(DIMS, pat_strs, pat_dims, CFL_SIZE); // ----------------------------------------------------------- // fmac pattern basis tmp complex float* tmp = md_alloc_sameplace(DIMS + 1, max_dims, CFL_SIZE, bas); md_clear(DIMS, max_dims, tmp, CFL_SIZE); md_zfmac2(DIMS, max_dims, max_strs, tmp, bas_strs, bas, pat_strs, pat); // ----------------------------------------------------------- // transpose 6 16 tmp tmp2 // cannot do in place because there may be higher-level dims in use complex float* tmp2 = md_alloc_sameplace(DIMS + 1, trp_dims, CFL_SIZE, bas); md_transpose(DIMS + 1, COEFF_DIM, DIMS, trp_dims, tmp2, max_dims, tmp, CFL_SIZE); md_free(tmp); // ----------------------------------------------------------- // tproj tmp2 bas tmp3 complex float* tmp3 = md_alloc_sameplace(DIMS + 1, tproj_dims, CFL_SIZE, bas); md_zmatmulc(DIMS + 1, tproj_dims, tmp3, fake_bas_dims, bas, trp_dims, tmp2); md_free(tmp2); // ----------------------------------------------------------- // transpose 5 6 tmp3 tmp4 complex float* tmp4 = md_alloc_sameplace(DIMS + 1, tproj2_dims, CFL_SIZE, bas); md_transpose(DIMS + 1, TE_DIM, COEFF_DIM, tproj2_dims, tmp4, tproj_dims, tmp3, CFL_SIZE); md_free(tmp3); // ----------------------------------------------------------- // transpose 6 16 tmp4 stkern_mat md_transpose(DIMS + 1, COEFF_DIM, DIMS, stkern_dims, stkern_mat, tproj2_dims, tmp4, CFL_SIZE); md_free(tmp4); } static void stkern_apply(const operator_data_t* _data, unsigned int N, void* args[N]) { const struct stkern_data* data = CAST_DOWN(stkern_data, _data); long fake_cfksp_strs[DIMS]; long stkern_strs[DIMS]; long cfksp_strs[DIMS]; md_calc_strides(DIMS, fake_cfksp_strs, data->fake_cfksp_dims, CFL_SIZE); md_calc_strides(DIMS, stkern_strs, data->stkern_dims, CFL_SIZE); md_calc_strides(DIMS, cfksp_strs, data->cfksp_dims, CFL_SIZE); assert(2 == N); complex float* dst = args[0]; const complex float* src = args[1]; md_zmatmul2(DIMS, data->fake_cfksp_dims, fake_cfksp_strs, dst, data->stkern_dims, stkern_strs, data->stkern_mat, data->cfksp_dims, cfksp_strs, src); } static void stkern_del(const operator_data_t* _data) { const struct stkern_data* data = CAST_DOWN(stkern_data, _data); md_free((void*)data->stkern_mat); md_free((void*)data->stkern_mat_trans); xfree(data); } static const struct operator_s* stkern_init(const long pat_dims[DIMS], const complex float* pattern, const long bas_dims[DIMS], const complex float* basis, long stkern_dims[DIMS], const complex float* stkern_mat, long cfksp_dims[DIMS], bool use_gpu) { double start_time = timestamp(); PTR_ALLOC(struct stkern_data, data); SET_TYPEID(stkern_data, data); complex float* stkern_mat2 = md_alloc(DIMS, stkern_dims, CFL_SIZE); if (NULL != stkern_mat) md_copy(DIMS, stkern_dims, stkern_mat2, stkern_mat, CFL_SIZE); else { // FIXME this is very very slow on GPU create_stkern_mat(stkern_mat2, pat_dims, pattern, bas_dims, basis); #if 0 dump_cfl("stkern_mat", DIMS, stkern_dims, stkern_mat2); #endif } long dim0 = pat_dims[PHS1_DIM]; long dim1 = pat_dims[PHS2_DIM]; complex float* stkern_mat3 = md_alloc(DIMS, stkern_dims, CFL_SIZE); md_copy(DIMS, stkern_dims, stkern_mat3, stkern_mat2, CFL_SIZE); float* stkern_mat_trans = md_alloc(DIMS, stkern_dims, FL_SIZE); // Transpose 4x4 matrices and set to float int ncoeff = bas_dims[COEFF_DIM]; for(int img = 0 ; img < ncoeff*ncoeff ; img++) { complex float * nontrans = stkern_mat3 + img * dim0 * dim1; float * trans = stkern_mat_trans + img * dim0 * dim1; #pragma omp parallel for for(int i = 0 ; i < dim1 ; i++) { for(int j = 0 ; j < dim0 ; j++) { trans[i + j * dim1] = creal(nontrans[j + i * dim0]); } } } md_free(stkern_mat3); #ifdef USE_CUDA complex float* gpu_stkern_mat = NULL; float* gpu_stkern_mat_trans = NULL; if (use_gpu) { gpu_stkern_mat = md_gpu_move(DIMS, stkern_dims, stkern_mat2, CFL_SIZE); md_free(stkern_mat2); stkern_mat2 = gpu_stkern_mat; gpu_stkern_mat_trans = md_gpu_move(DIMS, stkern_dims, stkern_mat_trans, FL_SIZE); md_free(stkern_mat_trans); stkern_mat_trans = gpu_stkern_mat_trans; } #else assert(!use_gpu); #endif data->stkern_mat = stkern_mat2; data->stkern_mat_trans = stkern_mat_trans; md_copy_dims(DIMS, data->stkern_dims, stkern_dims); md_copy_dims(DIMS, data->cfksp_dims, cfksp_dims); long fake_cfksp_dims[DIMS]; md_select_dims(DIMS, ~COEFF_FLAG, fake_cfksp_dims, cfksp_dims); fake_cfksp_dims[TE_DIM] = cfksp_dims[COEFF_DIM]; md_copy_dims(DIMS, data->fake_cfksp_dims, fake_cfksp_dims); double end_time = timestamp(); debug_printf(DP_INFO, "stkern Time: %f\n", end_time - start_time); return operator_create(DIMS, cfksp_dims, DIMS, cfksp_dims, CAST_UP(PTR_PASS(data)), stkern_apply, stkern_del); } static void jtmodel_forward(const linop_data_t* _data, complex float* dst, const complex float* src) { UNUSED(src); UNUSED(dst); UNUSED(_data); error("TODO: implement compact forward op\n"); } static void jtmodel_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct jtmodel_data* data = CAST_DOWN(jtmodel_data, _data); #ifdef USE_INTEL_KERNELS const unsigned long ncoils = data->sens_dims[COIL_DIM]; const unsigned long nmaps = data->sens_dims[MAPS_DIM]; const unsigned long ncfimg = data->cfimg_dims[COEFF_DIM]; int dim0 = data->sens_dims[PHS1_DIM]; int dim1 = data->sens_dims[PHS2_DIM]; jtmodel_adjoint_benchmark_fast_parallel(data->sens, dst, src, dim0, dim1, ncoils, nmaps, ncfimg, data->plan2d, data->cfksp3); #else linop_adjoint_unchecked(data->sense_op, dst, src); #endif } #ifdef USE_INTEL_KERNELS static void jtmodel_intel_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct jtmodel_data* data = CAST_DOWN(jtmodel_data, _data); const struct operator_s* stkern_op = data->stkern_op; const struct stkern_data* sdata = CAST_DOWN(stkern_data, operator_get_data(stkern_op)); int dim0 = data->sens_dims[PHS1_DIM]; int dim1 = data->sens_dims[PHS2_DIM]; if (data->plan1d_0 == NULL) { debug_printf(DP_DEBUG1, "planning\n"); DftiCreateDescriptor(&(((struct jtmodel_data*)data)->plan1d_0), DFTI_SINGLE, DFTI_COMPLEX, 1, (MKL_LONG)dim0); DftiSetValue(((struct jtmodel_data*)data)->plan1d_0, DFTI_PLACEMENT, DFTI_INPLACE); DftiCommitDescriptor(((struct jtmodel_data*)data)->plan1d_0); DftiCreateDescriptor(&(((struct jtmodel_data*)data)->plan1d_1), DFTI_SINGLE, DFTI_COMPLEX, 1, (MKL_LONG)dim1); DftiSetValue(((struct jtmodel_data*)data)->plan1d_1, DFTI_PLACEMENT, DFTI_INPLACE); DftiCommitDescriptor(((struct jtmodel_data*)data)->plan1d_1);; } const unsigned long ncoils = data->sens_dims[COIL_DIM]; const unsigned long nmaps = data->sens_dims[MAPS_DIM]; const unsigned long nimg = data->cfimg_dims[COEFF_DIM]; jtmodel_normal_benchmark_fast_parallel(data->sens, sdata->stkern_mat_trans, dst, src, dim0, dim1, ncoils, nmaps, nimg, data->plan1d_0, data->plan1d_1, data->cfksp3, data->cfksp4); } #endif static void jtmodel_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct jtmodel_data* data = CAST_DOWN(jtmodel_data, _data); complex float* cfksp2 = md_alloc_sameplace(DIMS, data->cfksp_dims, CFL_SIZE, src); linop_forward_unchecked(data->sense_op, data->cfksp, src); operator_apply_unchecked(data->stkern_op, cfksp2, data->cfksp); linop_adjoint_unchecked(data->sense_op, dst, cfksp2); md_free(cfksp2); } static void jtmodel_del(const linop_data_t* _data) { const struct jtmodel_data* data = CAST_DOWN(jtmodel_data, _data); operator_free(data->stkern_op); md_free(data->cfksp); md_free(data->cfksp3); md_free(data->cfksp4); md_free(data->sens); //FIXME: free plan1d_0, plan1d_1 xfree(data); } /** * Create jtsense operator, y = P Phi F S a, * where P is the sampling operator, Phi is the basis, * F is the Fourier transform and S is the sensitivity maps * * @param max_dims maximal dimensions across all data structures * @param sense_op Fourier transform and sensitivity maps (F S) * @param temporal_op temporal operator (Phi) * @param sample_op sampling operator (P) */ struct linop_s* jtmodel_init(const long max_dims[DIMS], const struct linop_s* sense_op, const long pat_dims[DIMS], const complex float* pattern, const long bas_dims[DIMS], const complex float* basis, const complex float* stkern_mat, bool use_gpu) { PTR_ALLOC(struct jtmodel_data, data); SET_TYPEID(jtmodel_data, data); data->sense_op = sense_op; data->sens = NULL; md_singleton_dims(DIMS, data->sens_dims); // FIXME: make the select_dims take the inverse flags, so that it doesn't need to // change each time a new dim gets used md_select_dims(DIMS, (FFT_FLAGS | COIL_FLAG | COEFF_FLAG | CSHIFT_FLAG | TIME_FLAG), data->cfksp_dims, max_dims); #ifdef USE_CUDA data->cfksp = (use_gpu ? md_alloc_gpu : md_alloc)(DIMS, data->cfksp_dims, CFL_SIZE); #else assert(!use_gpu); data->cfksp = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); #endif long stkern_dims[DIMS]; md_select_dims(DIMS, (PHS1_FLAG | PHS2_FLAG | COEFF_FLAG | CSHIFT_FLAG | TIME_FLAG), stkern_dims, max_dims); stkern_dims[TE_DIM] = stkern_dims[COEFF_DIM]; debug_printf(DP_DEBUG3, "stkern_dims =\t"); debug_print_dims(DP_DEBUG3, DIMS, stkern_dims); const struct operator_s* stkern_op = stkern_init(pat_dims, pattern, bas_dims, basis, stkern_dims, stkern_mat, data->cfksp_dims, use_gpu); data->stkern_op = stkern_op; //(data)->cfksp3 = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); //(data)->cfksp4 = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); return linop_create(DIMS, data->cfksp_dims, linop_domain(sense_op)->N, linop_domain(sense_op)->dims, CAST_UP(data), jtmodel_forward, jtmodel_adjoint, jtmodel_normal, NULL, jtmodel_del); } #ifdef USE_INTEL_KERNELS struct linop_s* jtmodel_intel_init(const long max_dims[DIMS], const long cfimg_dims[DIMS], const struct linop_s* sense_op, const long sens_dims[DIMS], const complex float* sens, const long pat_dims[DIMS], const complex float* pattern, const long bas_dims[DIMS], const complex float* basis, const complex float* stkern_mat, bool use_gpu, DFTI_DESCRIPTOR_HANDLE plan1d_0, DFTI_DESCRIPTOR_HANDLE plan1d_1) { PTR_ALLOC(struct jtmodel_data, data); SET_TYPEID(jtmodel_data, data); data->sense_op = sense_op; md_copy_dims(DIMS, data->cfimg_dims, cfimg_dims); data->plan1d_0 = plan1d_0; data->plan1d_1 = plan1d_1; // FIXME: make the select_dims take the inverse flags, so that it doesn't need to // change each time a new dim gets used md_select_dims(DIMS, (FFT_FLAGS | COIL_FLAG | COEFF_FLAG | CSHIFT_FLAG | TIME_FLAG), data->cfksp_dims, max_dims); md_copy_dims(DIMS, data->sens_dims, sens_dims); #ifdef USE_CUDA data->cfksp = (use_gpu ? md_alloc_gpu : md_alloc)(DIMS, data->cfksp_dims, CFL_SIZE); data->sens = (use_gpu ? md_alloc_gpu : md_alloc)(DIMS, data->sens_dims, CFL_SIZE); #else assert(!use_gpu); data->cfksp = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); data->sens = md_alloc(DIMS, data->sens_dims, CFL_SIZE); #endif md_copy(DIMS, data->sens_dims, data->sens, sens, CFL_SIZE); long stkern_dims[DIMS]; md_select_dims(DIMS, (PHS1_FLAG | PHS2_FLAG | COEFF_FLAG | CSHIFT_FLAG | TIME_FLAG), stkern_dims, max_dims); stkern_dims[TE_DIM] = stkern_dims[COEFF_DIM]; debug_printf(DP_DEBUG3, "stkern_dims =\t"); debug_print_dims(DP_DEBUG3, DIMS, stkern_dims); const struct operator_s* stkern_op = stkern_init(pat_dims, pattern, bas_dims, basis, stkern_dims, stkern_mat, data->cfksp_dims, use_gpu); data->stkern_op = stkern_op; int dim0 = data->sens_dims[PHS1_DIM]; int dim1 = data->sens_dims[PHS2_DIM]; int sense_op_N = 16; MKL_LONG dims[2] = {dim1, dim0}; DftiCreateDescriptor(&((data)->plan2d), DFTI_SINGLE, DFTI_COMPLEX, 2, dims); DftiSetValue((data)->plan2d, DFTI_PLACEMENT, DFTI_NOT_INPLACE); DftiCommitDescriptor((data)->plan2d); (data)->cfksp3 = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); (data)->cfksp4 = md_alloc(DIMS, data->cfksp_dims, CFL_SIZE); return linop_create(DIMS, data->cfksp_dims, sense_op_N, data->cfimg_dims, CAST_UP(data), jtmodel_forward, jtmodel_adjoint, jtmodel_intel_normal, NULL, jtmodel_del); } #endif
pado_unw_unv_para.201912291420.debug_wrong_label_size.h
/* * pado.h * * Created on: Sep 4, 2018 * Author: Zhen Peng */ #ifndef INCLUDES_PADO_UNW_PARA_UNV_H_ #define INCLUDES_PADO_UNW_PARA_UNV_H_ #include <vector> #include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> #include <xmmintrin.h> #include <bitset> #include <cmath> #include "globals.h" #include "graph.h" #include <omp.h> using std::vector; using std::unordered_map; using std::map; using std::bitset; using std::stable_sort; using std::min; using std::fill; namespace PADO { //inti NUM_THREADS = 4; //const inti BATCH_SIZE = 1024; // The size for regular batch and bit array. //const inti BITPARALLEL_SIZE = 50; //const inti THRESHOLD_PARALLEL = 80; //// Batch based processing, 09/11/2018 template<inti BATCH_SIZE = 1024> class ParaVertexCentricPLL { private: static const inti BITPARALLEL_SIZE = 50; idi num_v_ = 0; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { idi batch_id; // Batch ID idi start_index; // Index to the array distances where the batch starts inti size; // Number of distances element in this batch Batch(idi batch_id_, idi start_index_, inti size_) : batch_id(batch_id_), start_index(start_index_), size(size_) { ; } }; struct DistanceIndexType { idi start_index; // Index to the array vertices where the same-ditance vertices start inti size; // Number of the same-distance vertices smalli dist; // The real distance DistanceIndexType(idi start_index_, inti size_, smalli dist_) : start_index(start_index_), size(size_), dist(dist_) { ; } }; smalli bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<Batch> batches; // Batch info vector<DistanceIndexType> distances; // Distance info vector<idi> vertices; // Vertices in the label, preresented as temperory ID }; //__attribute__((aligned(64))); // Structure for the type of temporary label struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, when do initialization, only initialize those short_index[v] whose indicator[BATCH_SIZE] is set. bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // Use a queue to store candidates vector<inti> candidates_que = vector<inti>(BATCH_SIZE); inti end_candidates_que = 0; vector<uint8_t> is_candidate = vector<uint8_t>(BATCH_SIZE, 0); }; //__attribute__((aligned(64))); // Structure of the public ordered index for distance queries. struct IndexOrdered { weighti bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<idi> label_id; vector<weighti> label_dists; }; vector<IndexType> L; vector<IndexOrdered> Index; // Ordered labels for original vertex ID void construct(const Graph &G); inline void bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots); // inline void bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<bool> &used_bp_roots); inline void batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated); // inline void batch_process( // const Graph &G, // idi b_id, // idi root_start, // inti roots_size, // vector<IndexType> &L, // const vector<bool> &used_bp_roots); inline void initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots); inline void push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_candidate_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter); inline bool distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter); inline void insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter); inline void update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter); inline void reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix); // Some parallel interfaces inline idi prefix_sum_for_offsets( vector<idi> &offsets); template<typename T> inline void collect_into_queue( vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue vector<T> &queue, idi &end_queue); template<typename T, typename Int> inline void TS_enqueue( vector<T> &queue, Int &end_queue, const T &e); // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //#ifdef PROFILE // vector<double> thds_adding_time = vector<double>(80, 0.0); // vector<uint64_t> thds_adding_count = vector<uint64_t>(80, 0); // L2CacheMissRate cache_miss; //#endif // vector<ShortIndex> tmp_short_index; // vector<ShortIndex> now_short_index; // End test public: ParaVertexCentricPLL() = default; ParaVertexCentricPLL(const Graph &G); weighti query( idi u, idi v); void print(); void switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank); void store_index_to_file( const char *filename, const vector<idi> &rank); void load_index_from_file( const char *filename); void order_labels( const vector<idi> &rank2id, const vector<idi> &rank); weighti query_distance( idi a, idi b); }; // class ParaVertexCentricPLL template<inti BATCH_SIZE> const inti ParaVertexCentricPLL<BATCH_SIZE>::BITPARALLEL_SIZE; template<inti BATCH_SIZE> ParaVertexCentricPLL<BATCH_SIZE>::ParaVertexCentricPLL(const Graph &G) { construct(G); } template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots) // CAS needs array { idi num_v = G.get_num_v(); idi num_e = G.get_num_e(); if (num_v <= BITPARALLEL_SIZE) { // if (true) {} // Sequential version std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } else { // Parallel version: Naive parallel enqueue std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } #pragma omp parallel for for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } } //inline void ParaVertexCentricPLL::bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<uint8_t> &used_bp_roots) //{ // idi num_v = G.get_num_v(); // idi num_e = G.get_num_e(); // //// std::vector<smalli> tmp_d(num_v); // distances from the root to every v // smalli *tmp_d = (smalli *) malloc(num_v * sizeof(smalli)); // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector< std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector< std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // //// fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // memset(tmp_d, (uint8_t) -1, num_v * sizeof(smalli)); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) { // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (smalli d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // // For parallel adding to que // idi que_size = que_t1 - que_t0; // vector<idi> offsets_tmp_queue(que_size); //#pragma omp parallel for // for (idi i_q = 0; i_q < que_size; ++i_q) { // offsets_tmp_queue[i_q] = G.out_degrees[que[que_t0 + i_q]]; // } // idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // vector<idi> tmp_que(num_neighbors); // vector<idi> sizes_tmp_que(que_size, 0); // // For parallel adding to sibling_es // vector< pair<idi, idi> > tmp_sibling_es(num_neighbors); // vector<idi> sizes_tmp_sibling_es(que_size, 0); // // For parallel adding to child_es // vector< pair<idi, idi> > tmp_child_es(num_neighbors); // vector<idi> sizes_tmp_child_es(que_size, 0); // //#pragma omp parallel for // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi tmp_que_i = que_i - que_t0; // location in the tmp_que // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // smalli td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // idi &size_in_group = sizes_tmp_sibling_es[tmp_que_i]; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// sibling_es[num_sibling_es].first = v; //// sibling_es[num_sibling_es].second = tv; //// ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // if (CAS(tmp_d + tv, SMALLI_MAX, td)) { // tmp_d[tv] = td // tmp_que[offsets_tmp_queue[tmp_que_i] + sizes_tmp_que[tmp_que_i]++] = tv; // } // } //// if (tmp_d[tv] == SMALLI_MAX) { //// que[que_h++] = tv; //// tmp_d[tv] = td; //// } // idi &size_in_group = sizes_tmp_child_es[tmp_que_i]; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// child_es[num_child_es].first = v; //// child_es[num_child_es].second = tv; //// ++num_child_es; // } // } // } // // // From tmp_sibling_es to sibling_es // idi total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_sibling_es); // collect_into_queue( // tmp_sibling_es, // offsets_tmp_queue, // sizes_tmp_sibling_es, // total_sizes_tmp_queue, // sibling_es, // num_sibling_es); // //#pragma omp parallel for // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // __sync_or_and_fetch(&tmp_s[v].second, tmp_s[w].first); // __sync_or_and_fetch(&tmp_s[w].second, tmp_s[v].first); //// tmp_s[v].second |= tmp_s[w].first; //// tmp_s[w].second |= tmp_s[v].first; // } // // // From tmp_child_es to child_es // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_child_es); // collect_into_queue( // tmp_child_es, // offsets_tmp_queue, // sizes_tmp_child_es, // total_sizes_tmp_queue, // child_es, // num_child_es); // //#pragma omp parallel for // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // __sync_or_and_fetch(&tmp_s[c].first, tmp_s[v].first); // __sync_or_and_fetch(&tmp_s[c].second, tmp_s[v].second); //// tmp_s[c].first |= tmp_s[v].first; //// tmp_s[c].second |= tmp_s[v].second; // } // // // From tmp_que to que // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_que); // collect_into_queue( // tmp_que, // offsets_tmp_queue, // sizes_tmp_que, // total_sizes_tmp_queue, // que, // que_h); // // que_t0 = que_t1; // que_t1 = que_h; // } // //#pragma omp parallel for // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // // free(tmp_d); //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots) { idi roots_bound = roots_start + roots_size; // init_start_reset_time -= WallTimer::get_time_mark(); // TODO: parallel enqueue { // active_queue for (idi r_real_id = roots_start; r_real_id < roots_bound; ++r_real_id) { if (!used_bp_roots[r_real_id]) { active_queue[end_active_queue++] = r_real_id; } } } // init_start_reset_time += WallTimer::get_time_mark(); // init_index_time -= WallTimer::get_time_mark(); // Short_index { // init_indicators_time -= WallTimer::get_time_mark(); if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } else { for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } //#pragma omp parallel for // for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); // once_candidated[v] = 0; // } end_once_candidated_queue = 0; if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } else { for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } // for (idi v = roots_start; v < roots_bound; ++v) { // if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels // } // } // init_indicators_time += WallTimer::get_time_mark(); } // // Real Index { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); // } } // init_index_time += WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); // Dist_matrix { if (roots_size >= THRESHOLD_PARALLEL) { // schedule dynamic is slower #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; smalli dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; smalli dist; for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // smalli dist; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // dist = Lr.distances[dist_i].dist; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; // } // } // } // } } // init_dist_matrix_time += WallTimer::get_time_mark(); } // Function that pushes v_head's labels to v_head's every neighbor template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, // vector<idi> &once_candidated_queue, // idi &end_once_candidated_queue, vector<idi> &tmp_once_candidated_queue, idi &size_tmp_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter) { const IndexType &Lv = L[v_head]; // These 2 index are used for traversing v_head's last inserted labels idi l_i_start = Lv.distances.rbegin()->start_index; idi l_i_bound = l_i_start + Lv.distances.rbegin()->size; // Traverse v_head's every neighbor v_tail idi e_i_start = G.vertices[v_head]; idi e_i_bound = e_i_start + G.out_degrees[v_head]; for (idi e_i = e_i_start; e_i < e_i_bound; ++e_i) { idi v_tail = G.out_edges[e_i]; if (used_bp_roots[v_head]) { continue; } if (v_tail < roots_start) { // v_tail has higher rank than any roots, then no roots can push new labels to it. return; } // if (v_tail <= Lv.vertices[l_i_start] + roots_start) { // v_tail has higher rank than any v_head's labels // return; // } // This condition cannot be used anymore since v_head's last inserted labels are not ordered from higher rank to lower rank now, because v_head's candidate set is a queue now rather than a bitmap. For a queue, its order of candidates are not ordered by ranks. const IndexType &L_tail = L[v_tail]; _mm_prefetch(&L_tail.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets[0][0], _MM_HINT_T0); // Traverse v_head's last inserted labels for (idi l_i = l_i_start; l_i < l_i_bound; ++l_i) { inti label_root_id = Lv.vertices[l_i]; idi label_real_id = label_root_id + roots_start; if (v_tail <= label_real_id) { // v_tail has higher rank than all remaining labels // For candidates_que, this is not true any more! // break; continue; } ShortIndex &SI_v_tail = short_index[v_tail]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail SI_v_tail.indicator.set(label_root_id); // {// Deal with data race // volatile char lock = 0; // if (CAS((void *) (&lock), static_cast<char>(0), static_cast<char>(1))) { // SI_v_tail.indicator.set(label_root_id); // } else { // continue; // } // } {//test // Check v_tail's indicator if (!SI_v_tail.indicator[label_root_id]) { printf("L%u: B%u short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, roots_start / BATCH_SIZE, v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); } // else { // printf("L%u: T%u: B%u: l_i: %u " // "short_index[%u].indicator[%u]: %u\n", // __LINE__, omp_get_thread_num(), roots_start / BATCH_SIZE, l_i, // v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); // } } // Add into once_candidated_queue if (!once_candidated[v_tail]) { // If v_tail is not in the once_candidated_queue yet, add it in if (CAS(&once_candidated[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail; } } // CHANGED! // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // ++total_check_count; const IndexType &L_label = L[label_real_id]; bool no_need_add = false; _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; // ++bp_hit_count; break; } } } if (no_need_add) { continue; } // Record vertex label_root_id as v_tail's candidates label // SI_v_tail.candidates.set(label_root_id); // if (!SI_v_tail.is_candidate[label_root_id]) { // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // } if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(&SI_v_tail.is_candidate[label_root_id], (uint8_t) 0, (uint8_t) 1)) { TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); // volatile inti old_v = SI_v_tail.end_candidates_que; // volatile inti new_v = old_v + 1; // while (!CAS(&SI_v_tail.end_candidates_que, old_v, new_v)) { // old_v = SI_v_tail.end_candidates_que; // new_v = old_v + 1; // } // SI_v_tail.candidates_que[old_v] = label_root_id; { SI_v_tail.indicator.set(label_root_id); } {//test // Check v_tail's indicator if (!SI_v_tail.indicator[label_root_id]) { printf("L%u: T%u: B%u: l_i: %u iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), roots_start / BATCH_SIZE, l_i, iter, v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); } } } } // Add into candidate_queue if (!got_candidates[v_tail]) { // If v_tail is not in candidate_queue, add it in (prevent duplicate) if (CAS(&got_candidates[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_candidate_queue[offset_tmp_queue + size_tmp_candidate_queue++] = v_tail; } } // // Add into once_candidated_queue //#pragma omp critical // if (!once_candidated[v_tail]) { // // If v_tail is not in the once_candidated_queue yet, add it in // once_candidated[v_tail] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail; // } // // Add into candidate_queue // if (!got_candidates[v_tail]) { // // If v_tail is not in candidate_queue, add it in (prevent duplicate) // got_candidates[v_tail] = true; // candidate_queue[end_candidate_queue++] = v_tail; // } } } // printf("v_head: %u, size_tmp_candidate_queue: %u\n", v_head, size_tmp_candidate_queue);//test } // Function for distance query; // traverse vertex v_id's labels; // return the distance between v_id and cand_root_id based on existing labels. // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template<inti BATCH_SIZE> inline bool ParaVertexCentricPLL<BATCH_SIZE>::distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter) { // ++total_check_count; // distance_query_time -= WallTimer::get_time_mark(); idi cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id {//test if (v == cand_real_id) { printf("T%u: " "In distance_query: v_id %u had got (%u, %u) in B%u, but is being pushed (%u, %u) in B%u again.\n", omp_get_thread_num(), v_id, v, dist, Lv.batches[b_i].batch_id, cand_real_id, iter, roots_start / BATCH_SIZE); // printf("tmp_short_index[%u].indicator[%u]: %u " // "now_short_index[%u].indicator[%u]: %u\n", // v_id, cand_real_id, // (idi) tmp_short_index[v_id].indicator[cand_root_id], // v_id, cand_real_id, // (idi) now_short_index[v_id].indicator[cand_root_id]); } } if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_matrix[cand_root_id][v] does not exit. continue; } inti d_tmp = dist + dist_matrix[cand_root_id][v]; {//test if (v == cand_real_id) { printf("d_tmp: %u dist: %u dist_matrix[%u][%u]: %u\n", d_tmp, dist, cand_real_id, v, dist_matrix[cand_root_id][v]); } } if (d_tmp <= iter) { // distance_query_time += WallTimer::get_time_mark(); // ++normal_hit_count; return false; } } } } // distance_query_time += WallTimer::get_time_mark(); return true; } // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_matrix; // but it only update the v_id's labels' vertices array; template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter) { L[v_id].vertices.push_back(cand_root_id); // Update the distance buffer if necessary idi v_root_id = v_id - roots_start; if (v_id >= roots_start && v_root_id < roots_size) { dist_matrix[v_root_id][cand_root_id + roots_start] = iter; } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter) { IndexType &Lv = L[v_id]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin()->size); } else { short_index[v_id].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.push_back(IndexType::Batch( b_id, Lv.distances.size(), 1)); } // Insert a new distance element with start_index, size, and dist Lv.distances.push_back(IndexType::DistanceIndexType( Lv.vertices.size() - inserted_count, inserted_count, iter)); } // Function to reset dist_matrix the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix) { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; // } // } // } // } } template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated) //inline void ParaVertexCentricPLL::batch_process( // const Graph &G, // idi b_id, // idi roots_start, // start id of roots // inti roots_size, // how many roots in the batch // vector<IndexType> &L, // const vector<bool> &used_bp_roots) { // initializing_time -= WallTimer::get_time_mark(); // static const idi num_v = G.get_num_v(); // static vector<idi> active_queue(num_v); // static idi end_active_queue = 0; // static vector<idi> candidate_queue(num_v); // static idi end_candidate_queue = 0; // static vector<ShortIndex> short_index(num_v); // static vector< vector<smalli> > dist_matrix(roots_size, vector<smalli>(num_v, SMALLI_MAX)); // static uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // static uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // static vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. // static idi end_once_candidated_queue = 0; // static uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // At the beginning of a batch, initialize the labels L and distance buffer dist_matrix; // printf("initializing...\n");//test initialize( short_index, dist_matrix, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, L, used_bp_roots); smalli iter = 0; // The iterator, also the distance for current iteration // initializing_time += WallTimer::get_time_mark(); {//test // now_short_index.assign(short_index.begin(), short_index.end()); } while (0 != end_active_queue) { // candidating_time -= WallTimer::get_time_mark(); ++iter; {//test // tmp_short_index.swap(now_short_index); } // Pushing // printf("pushing...\n");//test { // Prepare for parallel processing the active_queue and adding to candidate_queue. // Every vertex's offset location in tmp_candidate_queue // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_active_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. offsets_tmp_queue[i_queue] = G.out_degrees[active_queue[i_queue]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_candidate_queue(num_neighbors); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_candidate_queue(end_active_queue, 0); // similarly, every thread writes to tmp_once_candidated_queue at its offset location vector<idi> tmp_once_candidated_queue(num_neighbors); // And store the true number of new added once-candidated vertices. vector<idi> sizes_tmp_once_candidated_queue(end_active_queue, 0); // Traverse active vertices to push their labels as candidates // schedule dynamic is slower #pragma omp parallel for //TODO: turn on OpenMP for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { idi v_head = active_queue[i_queue]; is_active[v_head] = 0; // reset is_active push_labels( v_head, roots_start, G, L, short_index, // candidate_queue, // end_candidate_queue, tmp_candidate_queue, sizes_tmp_candidate_queue[i_queue], offsets_tmp_queue[i_queue], got_candidates, // once_candidated_queue, // end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_queue], once_candidated, used_bp_roots, iter); } {//test // now_short_index.assign(short_index.begin(), short_index.end()); } // According to sizes_tmp_candidate_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_candidate_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_candidate_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_candidate_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue candidate_queue, end_candidate_queue); // Get the offset for inserting to the real queue. total_new = prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // Collect all once-candidated vertices from tmp_once_candidated_queue into once_candidated_queue collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, sizes_tmp_once_candidated_queue, total_new, once_candidated_queue, end_once_candidated_queue); // printf("end_candidate_queue: %u\n", end_candidate_queue); fflush(stdout);//test end_active_queue = 0; // Set the active_queue empty } // candidating_time += WallTimer::get_time_mark(); if (end_candidate_queue == 0) { break; } // adding_time -= WallTimer::get_time_mark(); // Adding // printf("adding...\n");//test { ////////////////////////////////////////////////////////////////////////////////// // OpenMP Version // Prepare for parallel processing the candidate_queue and adding to active_queue. // Every vertex's offset location in tmp_active_queue is i_queue * roots_size // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_candidate_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. // A ridiculous bug here. The v_id will, if any, only add itself to the active queue. //offsets_tmp_queue[i_queue] = i_queue * roots_size; offsets_tmp_queue[i_queue] = i_queue; } // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_active_queue(end_candidate_queue); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_active_queue(end_candidate_queue, 0); // Traverse vertices in the candidate_queue to insert labels // Here schedule dynamic will be slower //#ifdef PROFILE // cache_miss.measure_start(); //#endif #pragma omp parallel for schedule(dynamic) for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { //#ifdef PROFILE // inti tid = omp_get_thread_num(); // thds_adding_time[tid] -= WallTimer::get_time_mark(); //#endif idi v_id = candidate_queue[i_queue]; inti inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id] = 0; // reset got_candidates inti bound_cand_i = short_index[v_id].end_candidates_que; for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { inti cand_root_id = short_index[v_id].candidates_que[cand_i]; {//test // Check v_id's indicator if (!short_index[v_id].indicator[cand_root_id]) { printf("L%u: T%u: B%u: iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), b_id, iter, v_id, cand_root_id + roots_start, (idi) short_index[v_id].indicator[cand_root_id]); } } short_index[v_id].is_candidate[cand_root_id] = 0; // Reset is_candidate // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id, roots_start, L, dist_matrix, iter)) { if (!is_active[v_id]) { is_active[v_id] = 1; tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; } // if (!be_active) { // be_active = true; // } // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only( cand_root_id, v_id, roots_start, roots_size, L, dist_matrix, iter); {//test // // Check v_id's indicator if (!short_index[v_id].indicator[cand_root_id]) { printf("L:%u T%u: B%u iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), b_id, iter, v_id, cand_root_id + roots_start, (idi) short_index[v_id].indicator[cand_root_id]); } // Traverse all v_id's labels and check if cand_root_id is there const IndexType &Lv = L[v_id]; inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v == cand_root_id + roots_start) { printf("! T%u: " "v_id %u already got (%u, %u), rather than (%u, %u)\n", omp_get_thread_num(), v_id, v, dist, cand_root_id + roots_start, iter); // exit(-1); } } } } } } } short_index[v_id].end_candidates_que = 0; // if (be_active) { // if (CAS(&is_active[v_id], (uint8_t) 0, (uint8_t) 1)) { // tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; // } // } if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id, inserted_count, L, short_index, b_id, iter); } } // According to sizes_tmp_active_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_active_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_active_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_active_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue active_queue, end_active_queue); end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////// ////// Sequential version // for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // idi v_id = candidate_queue[i_queue]; // inti inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id] = false; // reset got_candidates // // Traverse v_id's all candidates // inti bound_cand_i = short_index[v_id].end_candidates_que; // for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // inti cand_root_id = short_index[v_id].candidates_que[cand_i]; // short_index[v_id].is_candidate[cand_root_id] = false; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id, // roots_start, // L, // dist_matrix, // iter) ) { // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id, // roots_start, // roots_size, // L, // dist_matrix, // iter); // } // } // short_index[v_id].end_candidates_que = 0; //// } // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id, // inserted_count, // L, // short_index, // b_id, // iter); // } // } // end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////////// } // adding_time += WallTimer::get_time_mark(); } // Reset the dist_matrix // initializing_time -= WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); reset_at_end( roots_start, roots_size, L, dist_matrix); // init_dist_matrix_time += WallTimer::get_time_mark(); // initializing_time += WallTimer::get_time_mark(); // double total_time = time_can + time_add; // printf("Candidating time: %f (%f%%)\n", time_can, time_can / total_time * 100); // printf("Adding time: %f (%f%%)\n", time_add, time_add / total_time * 100); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::construct(const Graph &G) { // initializing_time -= WallTimer::get_time_mark(); idi num_v = G.get_num_v(); num_v_ = num_v; L.resize(num_v); idi remainer = num_v % BATCH_SIZE; idi b_i_bound = num_v / BATCH_SIZE; // uint8_t *used_bp_roots = (uint8_t *) calloc(num_v, sizeof(uint8_t)); vector<uint8_t> used_bp_roots(num_v, 0); vector<idi> active_queue(num_v); idi end_active_queue = 0; vector<idi> candidate_queue(num_v); idi end_candidate_queue = 0; vector<ShortIndex> short_index(num_v); vector<vector<smalli> > dist_matrix(BATCH_SIZE, vector<smalli>(num_v, SMALLI_MAX)); // uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> got_candidates(num_v, 0); vector<uint8_t> is_active(num_v, 0); vector<idi> once_candidated_queue( num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. idi end_once_candidated_queue = 0; // uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> once_candidated(num_v, 0); // initializing_time += WallTimer::get_time_mark(); double time_labeling = -WallTimer::get_time_mark(); //double bp_labeling_time = -WallTimer::get_time_mark(); // printf("BP labeling...\n"); //test bit_parallel_labeling( G, L, used_bp_roots); //bp_labeling_time += WallTimer::get_time_mark(); for (idi b_i = 0; b_i < b_i_bound; ++b_i) { // printf("b_i: %u\n", b_i);//test batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i, // b_i * BATCH_SIZE, // BATCH_SIZE, // L, // used_bp_roots); } if (remainer != 0) { // printf("b_i: %u the last batch\n", b_i_bound);//test batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i_bound, // b_i_bound * BATCH_SIZE, // remainer, // L, // used_bp_roots); } time_labeling += WallTimer::get_time_mark(); // free(got_candidates); // free(is_active); // free(once_candidated); // free(used_bp_roots); // Test printf("Threads: %u Batch_size: %u\n", NUM_THREADS, BATCH_SIZE); //printf("BP_labeling: %.2f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); printf("BP_Roots_Size: %u\n", BITPARALLEL_SIZE); // printf("Initializing: %.2f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %.2f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %.2f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("\tdistance_query_time: %f (%f%%)\n", distance_query_time, distance_query_time / adding_time * 100); // printf("\ttotal_check_count: %llu\n", total_check_count); // printf("\tbp_hit_count (to total_check): %llu (%f%%)\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); #ifdef PROFILE uint64_t total_thds_adding_count = 0; double total_thds_adding_time = 0; for (inti tid = 0; tid < NUM_THREADS; ++tid) { total_thds_adding_count += thds_adding_count[tid]; total_thds_adding_time += thds_adding_time[tid]; } printf("Threads_adding_count:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %lu(%.2f%%)", thds_adding_count[tid], thds_adding_count[tid] * 100.0 / total_thds_adding_count); } puts(""); printf("Threads_adding_time:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %f(%.2f%%)", thds_adding_time[tid], thds_adding_time[tid] * 100.0 / total_thds_adding_time); } puts(""); //printf("Threads_adding_average_time:"); //for (inti tid = 0; tid < NUM_THREADS; ++tid) { // printf(" %f", thds_adding_time[tid] / thds_adding_count[tid]); //} puts(""); cache_miss.print(); #endif printf("Total_labeling_time: %.2f seconds\n", time_labeling); // End test } // Function to get the prefix sum of elements in offsets template<inti BATCH_SIZE> inline idi ParaVertexCentricPLL<BATCH_SIZE>::prefix_sum_for_offsets( vector<idi> &offsets) { idi size_offsets = offsets.size(); if (1 == size_offsets) { idi tmp = offsets[0]; offsets[0] = 0; return tmp; } else if (size_offsets < 2048) { idi offset_sum = 0; idi size = size_offsets; for (idi i = 0; i < size; ++i) { idi tmp = offsets[i]; offsets[i] = offset_sum; offset_sum += tmp; } return offset_sum; } else { // Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications idi last_element = offsets[size_offsets - 1]; // idi size = 1 << ((idi) log2(size_offsets - 1) + 1); idi size = 1 << ((idi) log2(size_offsets)); // vector<idi> nodes(size, 0); idi tmp_element = offsets[size - 1]; //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // nodes[i] = offsets[i]; // } // Up-Sweep (Reduce) Phase idi log2size = log2(size); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; } } // Down-Sweep Phase offsets[size - 1] = 0; for (idi d = log2(size) - 1; d != (idi) -1; --d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { idi t = offsets[k + (1 << d) - 1]; offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; offsets[k + (1 << (d + 1)) - 1] += t; } } //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // offsets[i] = nodes[i]; // } if (size != size_offsets) { idi tmp_sum = offsets[size - 1] + tmp_element; for (idi i = size; i < size_offsets; ++i) { idi t = offsets[i]; offsets[i] = tmp_sum; tmp_sum += t; } } return offsets[size_offsets - 1] + last_element; } // // Get the offset as the prefix sum of out degrees // idi offset_sum = 0; // idi size = offsets.size(); // for (idi i = 0; i < size; ++i) { // idi tmp = offsets[i]; // offsets[i] = offset_sum; // offset_sum += tmp; // } // return offset_sum; //// Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications // idi size_offsets = offsets.size(); // idi last_element = offsets[size_offsets - 1]; //// idi size = 1 << ((idi) log2(size_offsets - 1) + 1); // idi size = 1 << ((idi) log2(size_offsets)); //// vector<idi> nodes(size, 0); // idi tmp_element = offsets[size - 1]; ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// nodes[i] = offsets[i]; //// } // // // Up-Sweep (Reduce) Phase // idi log2size = log2(size); // for (idi d = 0; d < log2size; ++d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; // } // } // // // Down-Sweep Phase // offsets[size - 1] = 0; // for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // idi t = offsets[k + (1 << d) - 1]; // offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; // offsets[k + (1 << (d + 1)) - 1] += t; // } // } // ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// offsets[i] = nodes[i]; //// } // if (size != offsets.size()) { // idi tmp_sum = offsets[size - 1] + tmp_element; // idi i_bound = offsets.size(); // for (idi i = size; i < i_bound; ++i) { // idi t = offsets[i]; // offsets[i] = tmp_sum; // tmp_sum += t; // } // } // // return offsets[size_offsets - 1] + last_element; } // Collect elements in the tmp_queue into the queue template<inti BATCH_SIZE> template<typename T> inline void ParaVertexCentricPLL<BATCH_SIZE>::collect_into_queue( // vector<idi> &tmp_queue, vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue // vector<idi> &queue, vector<T> &queue, idi &end_queue) { if (0 == num_elements) { return; } idi i_bound = offsets_tmp_queue.size(); #pragma omp parallel for for (idi i = 0; i < i_bound; ++i) { idi i_q_start = end_queue + offsets_queue[i]; idi i_q_bound; if (i_bound - 1 != i) { i_q_bound = end_queue + offsets_queue[i + 1]; } else { i_q_bound = end_queue + num_elements; } if (i_q_start == i_q_bound) { // If the group has no elements to be added, then continue to the next group continue; } idi end_tmp = offsets_tmp_queue[i]; for (idi i_q = i_q_start; i_q < i_q_bound; ++i_q) { queue[i_q] = tmp_queue[end_tmp++]; } } end_queue += num_elements; } // Function: thread-save enqueue. The queue has enough size already. An index points the end of the queue. template<inti BATCH_SIZE> template<typename T, typename Int> inline void ParaVertexCentricPLL<BATCH_SIZE>::TS_enqueue( vector<T> &queue, Int &end_queue, const T &e) { volatile Int old_i = end_queue; volatile Int new_i = old_i + 1; while (!CAS(&end_queue, old_i, new_i)) { old_i = end_queue; new_i = old_i + 1; } queue[old_i] = e; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::store_index_to_file( const char *filename, const vector<idi> &rank) { // TODO: fout comment out // std::ofstream fout(filename); // if (!fout.is_open()) { // fprintf(stderr, "Error: cannot open file %s\n", filename); // exit(EXIT_FAILURE); // } // std::string txt_filename = std::string(filename) + ".txt";//test // std::ofstream txt_out(txt_filename.c_str()); // Store into file the number of vertices and the number of bit-parallel roots. uint64_t labels_count = 0; // fout.write((char *) &num_v_, sizeof(num_v_)); // fout.write((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); for (idi v_id = 0; v_id < num_v_; ++v_id) { idi v_rank = rank[v_id]; const IndexType &Lv = L[v_rank]; idi size_labels = Lv.vertices.size(); labels_count += size_labels; // // Store Bit-parallel Labels into file. // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // weighti d = Lv.bp_dist[b_i]; // uint64_t s0 = Lv.bp_sets[b_i][0]; // uint64_t s1 = Lv.bp_sets[b_i][1]; // fout.write((char *) &d, sizeof(d)); // fout.write((char *) &s0, sizeof(s0)); // fout.write((char *) &s1, sizeof(s1)); // } vector<std::pair<idi, weighti> > ordered_labels; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; weighti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; ordered_labels.push_back(std::make_pair(tail, dist)); } } } // Sort sort(ordered_labels.begin(), ordered_labels.end()); // // Store into file // fout.write((char *) &size_labels, sizeof(size_labels)); for (idi l_i = 0; l_i < size_labels; ++l_i) { idi l = ordered_labels[l_i].first; weighti d = ordered_labels[l_i].second; // fout.write((char *) &l, sizeof(l)); // fout.write((char *) &d, sizeof(d)); // {//test // txt_out << v_id << " " << v_rank << ": " << l << " " << (idi) d << std::endl; // } } } printf("Label_size: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v_); // fout.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::load_index_from_file( const char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi num_v; // Load from file the number of vertices and the number of bit-parallel roots. fin.read((char *) &num_v, sizeof(num_v)); fin.read((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); num_v_ = num_v; Index.resize(num_v); uint64_t labels_count = 0; // Load labels for every vertex for (idi v_id = 0; v_id < num_v; ++v_id) { IndexOrdered &Iv = Index[v_id]; // Load Bit-parallel Labels from file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { fin.read((char *) &Iv.bp_dist[b_i], sizeof(Iv.bp_dist[b_i])); fin.read((char *) &Iv.bp_sets[b_i][0], sizeof(Iv.bp_sets[b_i][0])); fin.read((char *) &Iv.bp_sets[b_i][1], sizeof(Iv.bp_sets[b_i][1])); } // Normal Labels // Load Labels from file. idi size_labels; fin.read((char *) &size_labels, sizeof(size_labels)); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); Iv.label_dists.resize(size_labels + 1); for (idi l_i = 0; l_i < size_labels; ++l_i) { fin.read((char *) &Iv.label_id[l_i], sizeof(Iv.label_id[l_i])); fin.read((char *) &Iv.label_dists[l_i], sizeof(Iv.label_dists[l_i])); } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = (weighti) -1; // Sentinel } printf("Label_size_loaded: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); fin.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::order_labels( const vector<idi> &rank2id, const vector<idi> &rank) { idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > ordered_L(num_v); idi labels_count = 0; Index.resize(num_v); // Traverse the L, put them into Index (ordered labels) for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; IndexOrdered &Iv = Index[new_v]; const IndexType &Lv = L[v_id]; auto &OLv = ordered_L[new_v]; // Bit-parallel Labels memcpy(&Iv.bp_dist, &Lv.bp_dist, BITPARALLEL_SIZE * sizeof(weighti)); for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { memcpy(&Iv.bp_sets[b_i], &Lv.bp_sets[b_i], 2 * sizeof(uint64_t)); } // Normal Labels // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); OLv.push_back(std::make_pair(tail, dist)); } } } // Sort sort(OLv.begin(), OLv.end()); // Store into Index inti size_labels = OLv.size(); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); // Adding one for Sentinel Iv.label_dists.resize(size_labels + 1); // Adding one for Sentinel for (inti l_i = 0; l_i < size_labels; ++l_i) { Iv.label_id[l_i] = OLv[l_i].first; Iv.label_dists[l_i] = OLv[l_i].second; } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = WEIGHTI_MAX; // Sentinel } printf("Label_size: %u mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); // // Test // { // puts("Asserting..."); // for (idi v_id = 0; v_id < num_v; ++v_id) { // const IndexType &Lv = L[v_id]; // const IndexOrdered &Iv = Index[rank2id[v_id]]; // // Bit-parallel Labels // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // assert(Lv.bp_dist[b_i] == Iv.bp_dist[b_i]); // assert(Lv.bp_sets[b_i][0] == Iv.bp_sets[b_i][0]); // assert(Lv.bp_sets[b_i][1] == Iv.bp_sets[b_i][1]); // } // // Normal Labels // assert(Lv.vertices.size() == Iv.label_id.size()); // assert(Lv.vertices.size() == Iv.label_dists.size()); //// { //// inti bound_i = Iv.label_id.size() > 10 ? 10 : Iv.label_id.size(); //// printf("V %u:", rank2id[v_id]); //// for (inti i = 0; i < bound_i; ++i) { //// printf(" (%u, %u)", Iv.label_id[i], Iv.label_dists[i]); //// } //// puts(""); //// } // // } // puts("Asserted."); // } } template<inti BATCH_SIZE> weighti ParaVertexCentricPLL<BATCH_SIZE>::query_distance( idi a, idi b) { idi num_v = num_v_; if (a >= num_v || b >= num_v) { return a == b ? 0 : WEIGHTI_MAX; } // // A is shorter than B // IndexOrdered &Ia = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[b] : Index[a]; // // A is longer than B // IndexOrdered &Ia = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[b] : Index[a]; IndexOrdered &Ia = Index[a]; IndexOrdered &Ib = Index[b]; // const IndexOrdered &Ia = Index[a]; // const IndexOrdered &Ib = Index[b]; inti d = WEIGHTI_MAX; _mm_prefetch(&Ia.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ia.label_dists[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_dists[0], _MM_HINT_T0); // Bit-Parallel Labels for (int i = 0; i < BITPARALLEL_SIZE; ++i) { int td = Ia.bp_dist[i] + Ib.bp_dist[i]; if (td - 2 <= d) { td += (Ia.bp_sets[i][0] & Ib.bp_sets[i][0]) ? -2 : ((Ia.bp_sets[i][0] & Ib.bp_sets[i][1]) | (Ia.bp_sets[i][1] & Ib.bp_sets[i][0])) ? -1 : 0; if (td < d) { d = td; } } } // Normal Labels (ordered) // // Vectorizaed Version // vector<idi> &A = Ia.label_id; // vector<idi> &B = Ib.label_id; // idi len_B = B.size() - 1; //// idi len_B = B.size(); // idi bound_b_base_i = len_B - (len_B % NUM_P_INT); // idi a_i = 0; // idi b_base_i = 0; // idi len_A = A.size() - 1; //// idi len_A = A.size(); // ++length_larger_than_16.second; // if (len_B >= 16) { // ++length_larger_than_16.first; // } // while (a_i < len_A && b_base_i < bound_b_base_i) { // int a = A[a_i]; // __m512i a_v = _mm512_set1_epi32(a); // // // Packed b // __m512i b_v = _mm512_loadu_epi32(&B[b_base_i]); // @suppress("Function cannot be resolved") // __mmask16 is_equal_m = _mm512_cmpeq_epi32_mask(a_v, b_v); // if (is_equal_m) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i + (idi) (log2(is_equal_m))]; // if (td < d) { // d = td; // } // // // Advance index // if (is_equal_m & (__mmask16) 0x8000) { // ++a_i; // b_base_i += NUM_P_INT; // } else { // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } else { // // Advance index // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } // while (a_i < len_A && b_base_i < len_B) { // if (A[a_i] == B[b_base_i]) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i]; // if (td < d) { // d = td; // } // // // Advance index // ++a_i; // ++b_base_i; // } else { // // Advance index // a_i += (A[a_i] < B[b_base_i]) ? 1 : 0; // b_base_i += (B[b_base_i] < A[a_i]) ? 1 : 0; // } // } // Sequential Version for (idi i1 = 0, i2 = 0;;) { idi v1 = Ia.label_id[i1], v2 = Ib.label_id[i2]; if (v1 == v2) { if (v1 == num_v) { break; // Sentinel } inti td = Ia.label_dists[i1] + Ib.label_dists[i2]; if (td < d) { d = td; } ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= WEIGHTI_MAX - 2) { d = WEIGHTI_MAX; } return d; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank) { idi label_sum = 0; idi test_label_sum = 0; // idi num_v = rank2id.size(); idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > new_L(num_v); // for (idi r = 0; r < num_v; ++r) { // idi v = rank2id[r]; // const IndexType &Lr = L[r]; // IndexType &Lv = new_L[v]; // idi size = Lr.get_size(); // label_sum += size; // for (idi li = 0; li < size; ++li) { // idi l = Lr.get_label_ith_v(li); // idi new_l = rank2id[l]; // Lv.add_label_seq(new_l, Lr.get_label_ith_d(li)); // } // } // L = new_L; for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { label_sum += Lv.distances[dist_i].size; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); new_L[new_v].push_back(std::make_pair(tail, dist)); ++test_label_sum; } } } } printf("Label sum: %u %u mean: %f\n", label_sum, test_label_sum, label_sum * 1.0 / num_v); // // Try to print // for (idi v = 0; v < num_v; ++v) { // const auto &Lv = new_L[v]; // idi size = Lv.size(); // printf("Vertex %u (Size %u):", v, size); // for (idi i = 0; i < size; ++i) { // printf(" (%u, %d)", Lv[i].first, Lv[i].second); // fflush(stdout); // } // puts(""); // } // // Try query // idi u; // idi v; // while (std::cin >> u >> v) { // weighti dist = WEIGHTI_MAX; // // Bit Parallel Check // const IndexType &idx_u = L[rank[u]]; // const IndexType &idx_v = L[rank[v]]; // // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // int td = idx_v.bp_dist[i] + idx_u.bp_dist[i]; // if (td - 2 <= dist) { // td += // (idx_v.bp_sets[i][0] & idx_u.bp_sets[i][0]) ? -2 : // ((idx_v.bp_sets[i][0] & idx_u.bp_sets[i][1]) // | (idx_v.bp_sets[i][1] & idx_u.bp_sets[i][0])) // ? -1 : 0; // if (td < dist) { // dist = td; // } // } // } // // // Normal Index Check // const auto &Lu = new_L[u]; // const auto &Lv = new_L[v]; //// unsorted_map<idi, weighti> markers; // map<idi, weighti> markers; // for (idi i = 0; i < Lu.size(); ++i) { // markers[Lu[i].first] = Lu[i].second; // } // for (idi i = 0; i < Lv.size(); ++i) { // const auto &tmp_l = markers.find(Lv[i].first); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + Lv[i].second; // if (d < dist) { // dist = d; // } // } // if (dist == 255) { // printf("2147483647\n"); // } else { // printf("%u\n", dist); // } // } } } #endif /* INCLUDES_PADO_H_ */
kernel_cpu.c
// #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #ifdef _OPENMP #include <omp.h> #endif // (in directory known to compiler) needed by openmp #include <stdio.h> // (in directory known to compiler) needed by printf, stderr #include <stdlib.h> // (in directory known to compiler) needed by malloc //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "../common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "../util/timer/timer.h" // (in directory provided here) //========================================================================================================================================================================================================200 // KERNEL_CPU FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu(int cores_arg, record *records, knode *knodes, long knodes_elem, long records_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int max_nthreads; #ifdef _OPENMP max_nthreads = omp_get_max_threads(); // printf("max # of threads = %d\n", max_nthreads); omp_set_num_threads(cores_arg); // printf("set # of threads = %d\n", cores_arg); #endif int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; int x = 100; int *A; A = (int *)malloc(sizeof(int) * x); // process number of querries #pragma omp target map( \ to : keys[ : count], \ knodes[ : knodes_elem], records[ : records_elem]) \ map(tofrom : offset[ : count], \ ans[ : count], currKnode[ : count]) \ device(DEVICE_ID) { #pragma omp parallel for for (bid = 0; bid < count; bid++) { // process levels of the tree for (i = 0; i < maxheight; i++) { // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { // if value is between the two keys if ((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid + 1] > keys[bid])) { // this conditional statement is inserted to avoid crush due to but // in original code // "offset[bid]" calculated below that addresses knodes[] in the // next iteration goes outside of its bounds cause segmentation // fault // more specifically, values saved into knodes->indices in the main // function are out of bounds of knodes that they address if (knodes[offset[bid]].indices[thid] < knodes_elem) { offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } // At this point, we have a candidate leaf node which may contain // the target record. Check each key to hopefully find the record // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { if (knodes[currKnode[bid]].keys[thid] == keys[bid]) { ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } } } void kernel_cpu(int cores_arg, record *records, knode *knodes, long knodes_elem, long records_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int max_nthreads; #ifdef _OPENMP max_nthreads = omp_get_max_threads(); // printf("max # of threads = %d\n", max_nthreads); omp_set_num_threads(cores_arg); // printf("set # of threads = %d\n", cores_arg); #endif int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; int x = 100; int *A; A = (int *)malloc(sizeof(int) * x); // process number of querries for (bid = 0; bid < count; bid++) { // process levels of the tree for (i = 0; i < maxheight; i++) { // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { // if value is between the two keys if ((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid + 1] > keys[bid])) { // this conditional statement is inserted to avoid crush due to but in // original code // "offset[bid]" calculated below that addresses knodes[] in the next // iteration goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main // function are out of bounds of knodes that they address if (knodes[offset[bid]].indices[thid] < knodes_elem) { offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } // At this point, we have a candidate leaf node which may contain // the target record. Check each key to hopefully find the record // process all leaves at each level for (thid = 0; thid < threadsPerBlock; thid++) { if (knodes[currKnode[bid]].keys[thid] == keys[bid]) { ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } }
thd_info.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "thd_info.h" /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Perform a parallel SUM reduction. * * @param thds The thread structure we are using in the reduction. * @param scratchid Which scratch array to reduce. * @param nelems How many elements in the scratch array. */ static inline void p_reduce_sum( thd_info * const thds, idx_t const scratchid, idx_t const nelems) { int const tid = splatt_omp_get_thread_num(); int const nthreads = splatt_omp_get_num_threads(); val_t * const myvals = (val_t *) thds[tid].scratch[scratchid]; int half = nthreads / 2; while(half > 0) { if(tid < half && tid + half < nthreads) { val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += target[i]; } } #pragma omp barrier /* check for odd number */ #pragma omp master if(half > 1 && half % 2 == 1) { val_t const * const last = (val_t *) thds[half-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += last[i]; } } /* next iteration */ half /= 2; } /* account for odd thread at end */ #pragma omp master { if(nthreads % 2 == 1) { val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += last[i]; } } } #pragma omp barrier } /** * @brief Perform a parallel MAX reduction. * * @param thds The thread structure we are using in the reduction. * @param scratchid Which scratch array to reduce. * @param nelems How many elements in the scratch array. */ static inline void p_reduce_max( thd_info * const thds, idx_t const scratchid, idx_t const nelems) { int const tid = splatt_omp_get_thread_num(); int const nthreads = splatt_omp_get_num_threads(); val_t * const myvals = (val_t *) thds[tid].scratch[scratchid]; int half = nthreads / 2; while(half > 0) { if(tid < half && tid + half < nthreads) { val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], target[i]); } } #pragma omp barrier /* check for odd number */ #pragma omp master if(half > 1 && half % 2 == 1) { val_t const * const last = (val_t *) thds[half-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], last[i]); } } /* next iteration */ half /= 2; } /* account for odd thread at end */ #pragma omp master { if(nthreads % 2 == 1) { val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], last[i]); } } } #pragma omp barrier } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void thd_reduce( thd_info * const thds, idx_t const scratchid, idx_t const nelems, splatt_reduce_type const which) { if(splatt_omp_get_num_threads() == 1) { return; } /* just to be safe in case any thread data is being copied */ #pragma omp barrier switch(which) { case REDUCE_SUM: p_reduce_sum(thds, scratchid, nelems); break; case REDUCE_MAX: p_reduce_max(thds, scratchid, nelems); break; default: fprintf(stderr, "SPLATT: thd_reduce supports SUM and MAX only.\n"); abort(); } } thd_info * thd_init( idx_t const nthreads, idx_t const nscratch, ...) { thd_info * thds = (thd_info *) splatt_malloc(nthreads * sizeof(thd_info)); for(idx_t t=0; t < nthreads; ++t) { timer_reset(&thds[t].ttime); thds[t].nscratch = nscratch; thds[t].scratch = (void **) splatt_malloc(nscratch * sizeof(void*)); } va_list args; va_start(args, nscratch); for(idx_t s=0; s < nscratch; ++s) { idx_t const bytes = va_arg(args, idx_t); for(idx_t t=0; t < nthreads; ++t) { thds[t].scratch[s] = (void *) splatt_malloc(bytes); memset(thds[t].scratch[s], 0, bytes); } } va_end(args); return thds; } void thd_times( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { printf(" thd: %"SPLATT_PF_IDX" %0.3fs\n", t, thds[t].ttime.seconds); } } void thd_reset( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { timer_reset(&thds[t].ttime); } } void thd_free( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { for(idx_t s=0; s < thds[t].nscratch; ++s) { free(thds[t].scratch[s]); } free(thds[t].scratch); } free(thds); }
core.c
/* Generated by Cython 0.29.21 */ /* BEGIN: Cython Metadata { "distutils": { "name": "monotonic_align.core", "sources": [ "core.pyx" ] }, "module_name": "monotonic_align.core" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__monotonic_align__core #define __PYX_HAVE_API__monotonic_align__core /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "core.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { int __pyx_n; float max_neg_val; }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'monotonic_align.core' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "monotonic_align.core" extern int __pyx_module_is_main_monotonic_align__core; int __pyx_module_is_main_monotonic_align__core = 0; /* Implementation of 'monotonic_align.core' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_t_xs[] = "t_xs"; static const char __pyx_k_t_ys[] = "t_ys"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_paths[] = "paths"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_values[] = "values"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_paths; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_t_xs; static PyObject *__pyx_n_s_t_ys; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_values; static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static float __pyx_k_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__16; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_codeobj__26; /* Late includes */ /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { float __pyx_v_max_neg_val = __pyx_k_; int __pyx_v_x; int __pyx_v_y; float __pyx_v_v_prev; float __pyx_v_v_cur; int __pyx_v_index; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; long __pyx_t_4; int __pyx_t_5; long __pyx_t_6; long __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; float __pyx_t_11; float __pyx_t_12; float __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; } } /* "monotonic_align/core.pyx":13 * cdef float v_cur * cdef float tmp * cdef int index = t_x - 1 # <<<<<<<<<<<<<< * * for y in range(t_y): */ __pyx_v_index = (__pyx_v_t_x - 1); /* "monotonic_align/core.pyx":15 * cdef int index = t_x - 1 * * for y in range(t_y): # <<<<<<<<<<<<<< * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: */ __pyx_t_1 = __pyx_v_t_y; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_y = __pyx_t_3; /* "monotonic_align/core.pyx":16 * * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< * if x == y: * v_cur = max_neg_val */ __pyx_t_4 = (__pyx_v_y + 1); __pyx_t_5 = __pyx_v_t_x; if (((__pyx_t_4 < __pyx_t_5) != 0)) { __pyx_t_6 = __pyx_t_4; } else { __pyx_t_6 = __pyx_t_5; } __pyx_t_4 = __pyx_t_6; __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); __pyx_t_6 = 0; if (((__pyx_t_5 > __pyx_t_6) != 0)) { __pyx_t_7 = __pyx_t_5; } else { __pyx_t_7 = __pyx_t_6; } __pyx_t_6 = __pyx_t_4; for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { __pyx_v_x = __pyx_t_5; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":18 * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: * v_cur = max_neg_val # <<<<<<<<<<<<<< * else: * v_cur = value[y-1, x] */ __pyx_v_v_cur = __pyx_v_max_neg_val; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ goto __pyx_L7; } /* "monotonic_align/core.pyx":20 * v_cur = max_neg_val * else: * v_cur = value[y-1, x] # <<<<<<<<<<<<<< * if x == 0: * if y == 0: */ /*else*/ { __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_x; __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); } __pyx_L7:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ __pyx_t_8 = ((__pyx_v_x == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ __pyx_t_8 = ((__pyx_v_y == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":23 * if x == 0: * if y == 0: * v_prev = 0. # <<<<<<<<<<<<<< * else: * v_prev = max_neg_val */ __pyx_v_v_prev = 0.; /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ goto __pyx_L9; } /* "monotonic_align/core.pyx":25 * v_prev = 0. * else: * v_prev = max_neg_val # <<<<<<<<<<<<<< * else: * v_prev = value[y-1, x-1] */ /*else*/ { __pyx_v_v_prev = __pyx_v_max_neg_val; } __pyx_L9:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ goto __pyx_L8; } /* "monotonic_align/core.pyx":27 * v_prev = max_neg_val * else: * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< * value[y, x] += max(v_prev, v_cur) * */ /*else*/ { __pyx_t_10 = (__pyx_v_y - 1); __pyx_t_9 = (__pyx_v_x - 1); __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); } __pyx_L8:; /* "monotonic_align/core.pyx":28 * else: * v_prev = value[y-1, x-1] * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< * * for y in range(t_y - 1, -1, -1): */ __pyx_t_11 = __pyx_v_v_cur; __pyx_t_12 = __pyx_v_v_prev; if (((__pyx_t_11 > __pyx_t_12) != 0)) { __pyx_t_13 = __pyx_t_11; } else { __pyx_t_13 = __pyx_t_12; } __pyx_t_9 = __pyx_v_y; __pyx_t_10 = __pyx_v_x; *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; } } /* "monotonic_align/core.pyx":30 * value[y, x] += max(v_prev, v_cur) * * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): */ for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_y = __pyx_t_1; /* "monotonic_align/core.pyx":31 * * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 # <<<<<<<<<<<<<< * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 */ __pyx_t_10 = __pyx_v_y; __pyx_t_9 = __pyx_v_index; *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ __pyx_t_14 = ((__pyx_v_index != 0) != 0); if (__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); if (!__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_index; __pyx_t_15 = (__pyx_v_y - 1); __pyx_t_16 = (__pyx_v_index - 1); __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); __pyx_t_8 = __pyx_t_14; __pyx_L13_bool_binop_done:; if (__pyx_t_8) { /* "monotonic_align/core.pyx":33 * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 # <<<<<<<<<<<<<< * * */ __pyx_v_index = (__pyx_v_index - 1); /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ } } /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ /* function exit code */ } /* "monotonic_align/core.pyx":38 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { CYTHON_UNUSED int __pyx_v_b; int __pyx_v_i; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; /* "monotonic_align/core.pyx":39 * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< * cdef int i * for i in prange(b, nogil=True): */ __pyx_v_b = (__pyx_v_paths.shape[0]); /* "monotonic_align/core.pyx":41 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_b; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* "monotonic_align/core.pyx":42 * cdef int i * for i in prange(b, nogil=True): * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< */ __pyx_t_4.data = __pyx_v_paths.data; __pyx_t_4.memview = __pyx_v_paths.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; __pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; __pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; __pyx_t_4.suboffsets[1] = -1; __pyx_t_5.data = __pyx_v_values.data; __pyx_t_5.memview = __pyx_v_values.memview; __PYX_INC_MEMVIEW(&__pyx_t_5, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5.shape[0] = __pyx_v_values.shape[1]; __pyx_t_5.strides[0] = __pyx_v_values.strides[1]; __pyx_t_5.suboffsets[0] = -1; __pyx_t_5.shape[1] = __pyx_v_values.shape[2]; __pyx_t_5.strides[1] = __pyx_v_values.strides[2]; __pyx_t_5.suboffsets[1] = -1; __pyx_t_6 = __pyx_v_i; __pyx_t_7 = __pyx_v_i; __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "monotonic_align/core.pyx":41 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "monotonic_align/core.pyx":38 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ /* function exit code */ } /* Python wrapper */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("maximum_path_c", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_core}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "core", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcore(void) #else __Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_core(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_monotonic_align__core) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "monotonic_align.core")) { if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ __pyx_k_ = (-1e9); /* "monotonic_align/core.pyx":1 * import cython # <<<<<<<<<<<<<< * from cython.parallel import prange * */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
LBL.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <immintrin.h> int binary_search_right_boundary_kernel(const int *row_pointer, const int key_input, const int size) { int start = 0; int stop = size - 1; int median; int key_median; while (stop >= start) { median = (stop + start) / 2; key_median = row_pointer[median]; if (key_input >= key_median) start = median + 1; else stop = median - 1; } return start; } void Dot_Product_Avx2_dLBL(int len, const int *indx, const double *Val, const double *X, double *res) { const int *colIndPtr = indx; const double *matValPtr = (double *) Val; const double *x = (double *) X; int j; double result = 0.0; __m256d vec_y; vec_y = _mm256_setzero_pd(); int nnzThisLine = len; int k_iter = nnzThisLine / 4; int k_rem = nnzThisLine % 4; //Loop in multiples of 4 non-zeroes for (j = 0; j < k_iter; j++) { vec_y = _mm256_fmadd_pd( *((__m256d_u *) (matValPtr)), _mm256_set_pd(x[*(colIndPtr + 3)], x[*(colIndPtr + 2)], x[*(colIndPtr + 1)], x[*(colIndPtr)]), vec_y); matValPtr += 4; colIndPtr += 4; } // Horizontal addition if (k_iter) { // sum[0] += sum[1] ; sum[2] += sum[3] vec_y = _mm256_hadd_pd(vec_y, vec_y); // Cast avx_sum to 128 bit to obtain sum[0] and sum[1] __m128d sum_lo = _mm256_castpd256_pd128(vec_y); // Extract 128 bits to obtain sum[2] and sum[3] __m128d sum_hi = _mm256_extractf128_pd(vec_y, 1); // Add remaining two sums __m128d sse_sum = _mm_add_pd(sum_lo, sum_hi); // Store result result = sse_sum[0]; } //Remainder loop for nnzThisLine%4 for (j = 0; j < k_rem; j++) { result += *matValPtr++ * x[*colIndPtr++]; } *(double *) res = result; } //int main(int argc, char ** argv) int spmvLBL(int m,int n,int nnzR,int* RowPtr,int* ColIdx,double*Val,char* filename,double* GFlops_LBL,double* Time_LBL,double* time_pre,double* LBL_error) { //char *filename = argv[1]; //printf ("filename = %s\n", filename); //read matrix //int m, n, nnzR, isSymmetric; //mmio_info(&m, &n, &nnzR, &isSymmetric, filename); //int *RowPtr = (int *)malloc((m+1) * sizeof(int)); //int *ColIdx = (int *)malloc(nnzR * sizeof(int)); //double *Val = (double *)malloc(nnzR * sizeof(double)); //mmio_data(RowPtr, ColIdx, Val, filename); for (int i = 0; i < nnzR; i++) Val[i] = 1; //create X, Y,Y_golden double *X = (double *)malloc(sizeof(double) * (n+1)); double *Y = (double *)malloc(sizeof(double) * (m+1)); double *Y_golden = (double *)malloc(sizeof(double) * (m+1)); memset (X, 0, sizeof(double) * (n+1)); memset (Y, 0, sizeof(double) * (m+1)); memset (Y_golden, 0, sizeof(double) * (m+1)); for (int i = 0; i < n; i++) X[i] = 1; for (int i = 0; i < m; i++) for(int j = RowPtr[i]; j < RowPtr[i+1]; j++) Y_golden[i] += Val[j] * X[ColIdx[j]]; //int nthreads = atoi(argv[2]); //omp_set_num_threads(nthreads); int nthreads = omp_get_max_threads(); //int iter = atoi(argv[3]); //printf("#iter is %i \n", iter); int iter = 500; struct timeval t1, t2; gettimeofday(&t1, NULL); // find balanced points int *csrSplitter = (int *)malloc((nthreads+1) * sizeof(int)); //int *csrSplitter_normal = (int *)malloc((nthreads+1) * sizeof(int)); int stridennz = ceil((double)nnzR/(double)nthreads); //#pragma omp parallel for for (int tid = 0; tid <= nthreads; tid++) { // compute partition boundaries by partition of size stride int boundary = tid * stridennz; // clamp partition boundaries to [0, nnzR] boundary = boundary > nnzR ? nnzR : boundary; // binary search csrSplitter[tid] = binary_search_right_boundary_kernel(RowPtr, boundary, m + 1) - 1; } csrSplitter[0] = 0; //#pragma omp parallel for for (int tid = 1; tid <= nthreads; tid++) { // compute partition boundaries by partition of size stride int boundary = tid * stridennz; // clamp partition boundaries to [0, nnzR] boundary = boundary > nnzR ? nnzR : boundary; // binary search int spl = binary_search_right_boundary_kernel(RowPtr, boundary, m + 1) - 1; if(spl==csrSplitter[tid-1]) { spl = m>(spl+1)? (spl+1):m; csrSplitter[tid] = spl; } else { csrSplitter[tid] = spl; } } gettimeofday(&t2, NULL); double time_balanced_pre = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0); //printf("time_balanced_pre = %f\n", time_balanced_pre); time_pre[1] = time_balanced_pre; /* //-----------------------------------parallel_omp_balanced------------------------------------- gettimeofday(&t1, NULL); int currentiter = 0; for (currentiter = 0; currentiter < iter; currentiter++) { #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++) { double sum = 0; for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++) { sum += Val[j] * X[ColIdx[j]]; } Y[u] = sum; } } } gettimeofday(&t2, NULL); double time_balanced = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter; double GFlops_balanced = 2 * nnzR / time_balanced / pow(10,6); int errorcount_balanced = 0; for (int i = 0; i < m; i++) if (Y[i] != Y_golden[i]) errorcount_balanced++; //printf("time_balanced = %f\n", time_balanced); //printf("errorcount_balanced = %i\n", errorcount_balanced); //printf("GFlops_balanced = %f\n", GFlops_balanced); GFlops_LBL[0] = GFlops_balanced; Time_LBL[0] = time_balanced; LBL_error[0] = errorcount_balanced; //------------------------------------------------------------------------ */ //------------------------------------parallel_omp_balanced_avx2------------------------------------ int currentiter = 0; gettimeofday(&t1, NULL); for (currentiter = 0; currentiter < iter; currentiter++) { #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { for (int u = csrSplitter[tid]; u < csrSplitter[tid+1]; u++) { Dot_Product_Avx2_dLBL(RowPtr[u + 1] - RowPtr[u], ColIdx + RowPtr[u], Val, X, Y + u); } } } gettimeofday(&t2, NULL); double time_balanced_avx = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter; double GFlops_balanced_avx = 2 * nnzR / time_balanced_avx / pow(10,6); int errorcount_balanced_avx = 0; for (int i = 0; i < m; i++) if (Y[i] != Y_golden[i]) errorcount_balanced_avx++; //printf("time_balanced_avx = %f\n", time_balanced_avx); //printf("errorcount_balanced_avx = %i\n", errorcount_balanced_avx); //printf("GFlops_balanced_avx = %f\n", GFlops_balanced_avx); GFlops_LBL[1] = GFlops_balanced_avx; Time_LBL[1] = time_balanced_avx; LBL_error[1] = errorcount_balanced_avx; //------------------------------------------------------------------------ return 0; }
depth_to_space.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_DEPTH_TO_SPACE_H_ #define MACE_KERNELS_DEPTH_TO_SPACE_H_ #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { template<DeviceType D, typename T> struct DepthToSpaceOpFunctor : OpKernel { DepthToSpaceOpFunctor(OpKernelContext *context, const int block_size) : OpKernel(context), block_size_(block_size) {} MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); const index_t batch_size = input->dim(0); const index_t input_depth = input->dim(1); const index_t input_height = input->dim(2); const index_t input_width = input->dim(3); MACE_CHECK(input_depth % (block_size_ * block_size_) == 0, "input depth should be dividable by block_size * block_size", input_depth); const index_t output_depth = input_depth / (block_size_ * block_size_); const index_t output_width = input_width * block_size_; const index_t output_height = input_height * block_size_; std::vector<index_t> output_shape = {batch_size, output_depth, output_height, output_width}; MACE_RETURN_IF_ERROR(output->Resize(output_shape)); Tensor::MappingGuard logits_guard(input); Tensor::MappingGuard output_guard(output); const T *input_ptr = input->data<T>(); T *output_ptr = output->mutable_data<T>(); #pragma omp parallel for for (index_t b = 0; b < batch_size; ++b) { for (index_t d = 0; d < output_depth; ++d) { for (index_t h = 0; h < output_height; ++h) { const index_t in_h = h / block_size_; const index_t offset_h = (h % block_size_); for (int w = 0; w < output_width; ++w) { const index_t in_w = w / block_size_; const index_t offset_w = w % block_size_; const index_t offset_d = (offset_h * block_size_ + offset_w) * output_depth; const index_t in_d = d + offset_d; const index_t o_index = ((b * output_depth + d) * output_height + h) * output_width + w; const index_t i_index = ((b * input_depth + in_d) * input_height + in_h) * input_width + in_w; output_ptr[o_index] = input_ptr[i_index]; } } } } return MACE_SUCCESS; } const int block_size_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLDepthToSpaceKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLDepthToSpaceKernel); }; template<typename T> struct DepthToSpaceOpFunctor<DeviceType::GPU, T> : OpKernel { DepthToSpaceOpFunctor(OpKernelContext *context, const int block_size); MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLDepthToSpaceKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_DEPTH_TO_SPACE_H_
delete_inf_refcount.c
// RUN: %libomptarget-compile-run-and-check-generic #include <stdio.h> #include <omp.h> #pragma omp declare target int isHost; #pragma omp end declare target int main(void) { isHost = -1; #pragma omp target enter data map(to: isHost) #pragma omp target { isHost = omp_is_initial_device(); } #pragma omp target update from(isHost) if (isHost < 0) { printf("Runtime error, isHost=%d\n", isHost); } #pragma omp target exit data map(delete: isHost) // CHECK: Target region executed on the device printf("Target region executed on the %s\n", isHost ? "host" : "device"); return isHost; }
ConvolutionRules.h
// Copyright 2016-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #ifndef CONVOLUTIONRULES_H #define CONVOLUTIONRULES_H #include <cuda_runtime_api.h> #include <cuda.h> #include "RectangularRegions.h" #include "Metadata.h" #include <algorithm> // only supports 3D convolutions now, and will be modified in the future maybe. // coordinate exchange template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid, SparseGrid<dimension> &outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize, const std::vector<Float3> &input_normal, std::vector<Float3> &output_normal) { EASY_FUNCTION(profiler::colors::Green100); //right hand coordinate, int index[6*8] = {0,1,2,3,4,5,6,7, 6,7,4,5,2,3,0,1, 2,3,6,7,0,1,4,5, 4,5,0,1,6,7,2,3, 1,5,3,7,0,4,2,6, 4,0,6,2,5,1,7,3}; Int outputStart = outputGrid.ctr; std::vector<Int> inputObservations = std::vector<Int>(inputGrid.mp.size()); // orientation is determined by the first one? RuleBook candidate_rules = std::vector<std::vector<Int>>(volume<dimension>(size)); for(int i = 0; i < volume<dimension>(size);i++) { candidate_rules[i].reserve(inputGrid.mp.size()); } for (auto const &inIter : inputGrid.mp) { auto outRegion = OutputRegionCalculator<dimension>( inIter.first, size, stride, outputSpatialSize); // printf("new output\r\n\r\n"); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(inIter.first); auto outIter = outputGrid.mp.find(j); if (outIter == outputGrid.mp.end()) { outIter = outputGrid.mp.insert(std::make_pair(j, outputGrid.ctr++)).first; inputObservations[outIter->second - outputStart ] = 0; output_normal.push_back(Float3(0,0,0)); } inputObservations[outIter->second - outputStart ]++; output_normal[outIter->second] += input_normal[inIter.second+inputGrid.ctr]; // find mapping based on // printf("output point: %d %d %d %d %d\r\n",j[0],j[1],j[2], rulesOffset, outIter->second); candidate_rules[rulesOffset].push_back(inIter.second + inputGrid.ctr); candidate_rules[rulesOffset].push_back(outIter->second); } } std::vector<Int> oriented_index = std::vector<Int>(outputGrid.ctr - outputStart ); for(int i = outputStart ; i < outputGrid.ctr; i++) { output_normal[i] /= inputObservations[i - outputStart ]; output_normal[i].normalize(); oriented_index[i - outputStart ] = OrientedFilter(output_normal[i]); } rules.resize(volume<dimension>(size)); // there might be some problems here // target: low level texture information && high level geometry information // minimize the rotation invariance for(int i = 0; i < candidate_rules.size();i++) { for(int k = 0; k < candidate_rules[i].size();k+=2) { int ori_index = index[oriented_index[candidate_rules[i][k+1] - outputStart ] * 8 + i]; #if 0 rules[i].push_back(candidate_rules[i][k]); rules[i].push_back(candidate_rules[i][k+1]); #else rules[ori_index].push_back(candidate_rules[i][k]); rules[ori_index].push_back(candidate_rules[i][k+1]); #endif } } } template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid, SparseGrid<dimension> &outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize) { rules.resize(volume<dimension>(size)); for (auto const &inIter : inputGrid.mp) { auto outRegion = OutputRegionCalculator<dimension>( inIter.first, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(inIter.first); auto outIter = outputGrid.mp.find(j); if (outIter == outputGrid.mp.end()) { outIter = outputGrid.mp.insert(std::make_pair(j, outputGrid.ctr++)).first; } rules[rulesOffset].push_back(inIter.second + inputGrid.ctr); rules[rulesOffset].push_back(outIter->second); } } } // GPU #ifdef GPU_GRID // only supports 3D convolutions now, and will be modified in the future maybe. // coordinate exchange // maybe 3x3 is a better choice, but just a little slower, need to check in the future void dGenerateSpatialNewPoint (Int* d_prev_all_point, // size = num_active_point * dim Int* d_next_all_point, // size = num_active_point * maxi_sizec * dim long* d_size, long* d_stride, long* d_output_spatial_size, Int num_active_point, long maxi_sizec, Int ndim); template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(GPU_SparseGrid<dimension> &gpu_inputGrid, GPU_SparseGrid<dimension> &gpu_outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize, const std::vector<Float3> &input_normal, std::vector<Float3> &output_normal) { EASY_FUNCTION(profiler::colors::Green50); EASY_BLOCK("gen_outpoint"); Int nActiveInput = gpu_inputGrid.pHash->getCompactingSize(); rules.resize(volume<dimension>(size)); Int* in_points_flat = new Int[nActiveInput * dimension]; // size = nActiveInput * dimension gpuErrchk(cudaMemcpy(in_points_flat, gpu_inputGrid.pHash->getAllPoints(), sizeof(Int) * nActiveInput * dimension, cudaMemcpyDeviceToHost)); Point<dimension> p; Points<dimension> out_points; Ints out_index; EASY_BLOCK("transformat"); for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { out_points.push_back(j); } } EASY_END_BLOCK; EASY_END_BLOCK; EASY_BLOCK("insert retrieve"); gpu_outputGrid.pHash->insert_points(out_points); gpu_outputGrid.pHash->retrieve_points(out_points, out_index); EASY_END_BLOCK; EASY_BLOCK("Normal"); Int query_index = 0; //right hand coordinate, int index[6*8] = {0,1,2,3,4,5,6,7, 6,7,4,5,2,3,0,1, 2,3,6,7,0,1,4,5, 4,5,0,1,6,7,2,3, 1,5,3,7,0,4,2,6, 4,0,6,2,5,1,7,3}; std::vector<Int> inputObservations = std::vector<Int>(gpu_outputGrid.pHash->size); output_normal.resize(gpu_outputGrid.ctr + gpu_outputGrid.pHash->size); for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { inputObservations[out_index[query_index]]++; output_normal[gpu_outputGrid.ctr + out_index[query_index]] += input_normal[i + gpu_inputGrid.ctr]; query_index+=1; } } for(int k = 0; k < gpu_outputGrid.pHash->size;k++) { if(inputObservations[k] > 0) { output_normal[gpu_outputGrid.ctr + k] /= inputObservations[k]; } } EASY_END_BLOCK; EASY_BLOCK("query"); query_index = 0; for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(p); Int oriIndex = OrientedFilter(output_normal[gpu_outputGrid.ctr + out_index[query_index++]]); Int newRuleOffset = index[oriIndex * 8 + rulesOffset]; rules[newRuleOffset].push_back(i + gpu_inputGrid.ctr); rules[newRuleOffset].push_back(gpu_outputGrid.ctr + out_index[query_index++]); } } gpu_outputGrid.ctr += gpu_outputGrid.pHash->size; delete[] in_points_flat; EASY_END_BLOCK; } // utils to debug template <Int dimension> bool point_cmp(Point<dimension> a,Point<dimension> b) { for(Int i=0;i<dimension;i++) { if(a[i]<b[i])return true; if(a[i]>b[i])return false; } return false; } template <Int dimension> void point_sort(vector<Point<dimension>> &a) { sort(a.begin(),a.end(),point_cmp<dimension>); } // only support when size == stride case template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(GPU_SparseGrid<dimension> &gpu_inputGrid, GPU_SparseGrid<dimension> &gpu_outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize) { // Note that this is a special case for downscaling EASY_FUNCTION(profiler::colors::Green100); for(int i = 0; i < dimension; i++) { // printf("convolution kernel: %d %d %d\r\n", i,size[i], stride[i]); assert(size[i] == 2); assert(stride[i] == 2); } rules.resize(volume<dimension>(size)); Int nActiveInput = gpu_inputGrid.pHash->getCompactingSize(); Int* in_points_flat = new Int[nActiveInput * dimension]; // size = nActiveInput * dimension gpuErrchk(cudaMemcpy(in_points_flat, gpu_inputGrid.pHash->getAllPoints(), sizeof(Int) * nActiveInput * dimension, cudaMemcpyDeviceToHost)); // rewrite the rules generation part to get faster implementation Point<dimension> p; Points<dimension> out_points; Ints out_index; // note that this is a mapping from in_points to out_points for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { out_points.push_back(j); } } gpu_outputGrid.pHash->insert_points(out_points); gpu_outputGrid.pHash->retrieve_points(out_points, out_index); Int query_index = 0; for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(p); // printf("%d %d %d %d %d %d %d\r\n", p[0],p[1],p[2],j[0],j[1],j[2],rulesOffset); rules[rulesOffset].push_back(i + gpu_inputGrid.ctr); rules[rulesOffset].push_back(gpu_outputGrid.ctr + out_index[query_index++]); } } gpu_outputGrid.ctr += gpu_outputGrid.pHash->size; delete[] in_points_flat; } void d_Convolution_GenerateOutputRules(uint32_t * d_in_points, uint32_t * d_output_points, uint32_t * d_output_index, RuleBook &rules,Int num, Int dimension, Int filterSize, Int input_offset); at::Tensor FlatPoints(const at::Tensor &input_points); template <Int dimension> at::Tensor ResolutionBasedScatteringCuda(at::Tensor &points_lr, at::Tensor &points_hr, Int stride) { assert(dimension == points_lr.size(1)); int lr_point_num = points_lr.size(0); int hr_point_num = points_hr.size(0); GPU_SparseGrid<dimension> gpu_grid; at::Tensor point_lr_flat = FlatPoints(points_lr); at::Tensor point_hr_flat = FlatPoints(points_hr); at::Tensor point_hr_query = point_hr_flat / stride; at::Tensor hr2lr = torch::empty({hr_point_num}, at::CUDA(at_kINT)); gpu_grid.pHash->insert((uint32_t* )point_lr_flat.data<Int>(), lr_point_num); gpu_grid.pHash->retrieve((uint32_t* )point_hr_query.data<Int>(), (uint32_t* )hr2lr.data<Int>(),hr_point_num); return hr2lr; } template <Int dimension> void Convolution_InputSgToRulesAndOutputSg_FastDownSampleMode(GPU_SparseGrid<dimension> &gpu_inputGrid, GPU_SparseGrid<dimension> &gpu_outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize) { // Note that this is a special case for downscaling EASY_FUNCTION(profiler::colors::Green100); for(int i = 0; i < dimension; i++) { // printf("convolution kernel: %d %d %d\r\n", i,size[i], stride[i]); assert(size[i] == 2); assert(stride[i] == 2); } clock_t start,end; rules.resize(volume<dimension>(size)); Int nActiveInput = gpu_inputGrid.pHash->getCompactingSize(); at::Tensor d_in_points = at::empty({nActiveInput * dimension}, at::CUDA(at_kINT)); gpuErrchk(cudaMemcpy(d_in_points.data<Int>(), gpu_inputGrid.pHash->getAllPoints(), sizeof(Int) * nActiveInput * dimension, cudaMemcpyDeviceToDevice)); at::Tensor d_out_points = d_in_points / 2; at::Tensor d_results = at::empty({nActiveInput}, at::CUDA(at_kINT)); gpu_outputGrid.pHash->insert((uint32_t* )d_out_points.data<Int>(), nActiveInput); gpu_outputGrid.pHash->retrieve((uint32_t* )d_out_points.data<Int>(), (uint32_t* )d_results.data<Int>(),nActiveInput); d_results += gpu_outputGrid.ctr; d_Convolution_GenerateOutputRules((uint32_t* )d_in_points.data<Int>(),(uint32_t* )d_out_points.data<Int>(), (uint32_t* )d_results.data<Int>(), rules, nActiveInput, dimension, 2,gpu_inputGrid.ctr); gpu_outputGrid.ctr += gpu_outputGrid.pHash->size; #if 0 Int* in_points_flat = new Int[nActiveInput * dimension]; // size = nActiveInput * dimension gpuErrchk(cudaMemcpy(in_points_flat, gpu_inputGrid.pHash->getAllPoints(), sizeof(Int) * nActiveInput * dimension, cudaMemcpyDeviceToHost)); // generate output points // rewrite the rules generation part to get faster implementation Point<dimension> p; Points<dimension> out_points; Ints out_index; // note that this is a mapping from in_points to out_points for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { out_points.push_back(j); } } gpu_outputGrid.pHash->insert_points(out_points); gpu_outputGrid.pHash->retrieve_points(out_points, out_index); Int query_index = 0; for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(p); // printf("%d %d %d %d %d %d %d\r\n", p[0],p[1],p[2],j[0],j[1],j[2],rulesOffset); rules[rulesOffset].push_back(i + gpu_inputGrid.ctr); rules[rulesOffset].push_back(gpu_outputGrid.ctr + out_index[query_index++]); } } delete[] in_points_flat; #endif } #if 0 template <Int dimension> void Convolution_InputSgToRulesAndOutputSg(GPU_SparseGrid<dimension> &gpu_inputGrid, GPU_SparseGrid<dimension> &gpu_outputGrid, RuleBook &rules, long *size, long *stride, long *inputSpatialSize, long *outputSpatialSize) { EASY_FUNCTION(profiler::colors::Green100); EASY_BLOCK("GPU to CPU"); rules.resize(volume<dimension>(size)); Int nActiveInput = gpu_inputGrid.pHash->getCompactingSize(); Int* in_points_flat = new Int[nActiveInput * dimension]; // size = nActiveInput * dimension gpuErrchk(cudaMemcpy(in_points_flat, gpu_inputGrid.pHash->getAllPoints(), sizeof(Int) * nActiveInput * dimension, cudaMemcpyDeviceToHost)); Point<dimension> p; Points<dimension> out_points; EASY_END_BLOCK; // EASY_BLOCK("transformat"); // for (Int i = 0; i < nActiveInput; i++) { // for(Int k = 0; k < dimension; k++) { // p[k] = in_points_flat[i + k * nActiveInput]; // } // auto outRegion = OutputRegionCalculator<dimension>( // p, size, stride, outputSpatialSize); // for (auto j : outRegion) { // out_points.push_back(j); // } // } #ifdef NEW_SPTIAL_POINT EASY_BLOCK("new gen output"); // yet a more efficient way Int *d_prev_all_point=NULL; Int *d_next_all_point=NULL; long *d_size=NULL; long *d_stride=NULL; long *d_outputSpatialSize=NULL; d_prev_all_point=gpu_inputGrid.pHash->getAllPoints(); // small mem allocate gpuErrchk(cudaMalloc((void **)&d_size, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_size, size, sizeof(long) * dimension, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_stride, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_stride, stride, sizeof(long) * dimension, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_outputSpatialSize, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_outputSpatialSize, outputSpatialSize, sizeof(long) * dimension, cudaMemcpyHostToDevice)); // identify the output size long maxi_sizec=1; // Point<dimension> lb, ub,; for (Int i = 0; i < dimension; i++) { // lb[i] = std::max(0L, (input[i] - size[i] + stride[i]) / stride[i]); // ub[i] = std::min(outputSpatialSize[i] - 1, input[i] / stride[i]); maxi_sizec*= ( size[i] - stride[i]) / stride[i]+1; } gpuErrchk(cudaMalloc((void **)&d_next_all_point, sizeof(Int) * nActiveInput * maxi_sizec * dimension)); gpuErrchk(cudaMemset(d_next_all_point, 0, sizeof(Int) * nActiveInput * maxi_sizec * dimension)); dGenerateSpatialNewPoint( d_prev_all_point, // size = num_active_point * dim d_next_all_point, // size = num_active_point * maxi_sizec * dim d_size, d_stride, d_outputSpatialSize, nActiveInput, maxi_sizec, dimension); gpuErrchk( cudaDeviceSynchronize() ); EASY_END_BLOCK; /* EASY_BLOCK("GPU to CPU"); Int* tmp_outpoint=new Int[nActiveInput * maxi_sizec * dimension]; gpuErrchk(cudaMemcpy(tmp_outpoint, d_next_all_point, sizeof(Int) * nActiveInput * maxi_sizec * dimension, cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_next_all_point)); for (Int i = 0; i < nActiveInput; i++) { for (Int j = 0; j < maxi_sizec; j++) { for(Int k = 0; k < dimension; k++) { p[k] = tmp_outpoint[(i + k * nActiveInput)*maxi_sizec+j]; } out_points.push_back(p); } } delete tmp_outpoint; // can be more efficient here by modify porting issue: */ /* Int *d_prev_all_point=NULL; Int *d_next_all_point=NULL; long *d_size=NULL; long *d_stride=NULL; long *d_outputSpatialSize=NULL; gpuErrchk(cudaMalloc((void **)&d_prev_all_point, sizeof(Int) * nActiveInput * dimension)); gpuErrchk(cudaMemcpy(d_prev_all_point, in_points_flat, sizeof(Int) * nActiveInput * dimension, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_size, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_size, size, sizeof(long) * dimension, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_stride, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_stride, stride, sizeof(long) * dimension, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_outputSpatialSize, sizeof(long) * dimension)); gpuErrchk(cudaMemcpy(d_outputSpatialSize, outputSpatialSize, sizeof(long) * dimension, cudaMemcpyHostToDevice)); // output point mem size(upper bound) would be inactive* maxi_sizec long all_stride=1,maxi_sizec=1; for(Int k = 0; k < dimension; k++) { all_stride*=stride[k]; } // Point<dimension> lb, ub,; for (Int i = 0; i < dimension; i++) { // lb[i] = std::max(0L, (input[i] - size[i] + stride[i]) / stride[i]); // ub[i] = std::min(outputSpatialSize[i] - 1, input[i] / stride[i]); maxi_sizec*= ( size[i] - stride[i]) / stride[i]+1; } #ifdef PRINT_NEW_SPTIAL_POINT printf("maxi_sizec:%d",maxi_sizec); #endif gpuErrchk(cudaMalloc((void **)&d_next_all_point, sizeof(Int) * nActiveInput * maxi_sizec * dimension)); gpuErrchk(cudaMemset(d_next_all_point, 0, sizeof(Int) * nActiveInput * maxi_sizec * dimension)); EASY_END_BLOCK; EASY_BLOCK("new gen output"); dGenerateSpatialNewPoint( d_prev_all_point, // size = num_active_point * dim d_next_all_point, // size = num_active_point * maxi_sizec * dim d_size, d_stride, d_outputSpatialSize, nActiveInput, maxi_sizec, dimension); EASY_END_BLOCK; EASY_BLOCK("two ways portting"); gpuErrchk( cudaDeviceSynchronize() ); // FOR DEBUG Int* debug_outpoint=new Int[nActiveInput * maxi_sizec * dimension]; gpuErrchk(cudaMemcpy(debug_outpoint, d_next_all_point, sizeof(Int) * nActiveInput * maxi_sizec * dimension, cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_prev_all_point)); gpuErrchk(cudaFree(d_next_all_point)); vector<Point<dimension>> debug_outputpoint; // Point<dimension> p; for (Int i = 0; i < nActiveInput; i++) { for (Int j = 0; j < maxi_sizec; j++) { for(Int k = 0; k < dimension; k++) { p[k] = debug_outpoint[(i + k * nActiveInput)*maxi_sizec+j]; } debug_outputpoint.push_back(p); } } #ifdef PRINT_NEW_SPTIAL_POINT printf("\n%d %d\n",debug_outputpoint.size(),out_points.size()); #endif point_sort<dimension>(debug_outputpoint); point_sort<dimension>(out_points); for(Int i=0;i<min(debug_outputpoint.size(),out_points.size());i++) { if(debug_outputpoint[i]!=out_points[i]) { printf("two ways gen not same"); break; } #ifdef PRINT_NEW_SPTIAL_POINT if(i+1==min(debug_outputpoint.size(),out_points.size())) { printf("two ways gen the same"); } #endif } EASY_END_BLOCK; // cmp debug_outpoint and out_points // new transformate here // input gpumem all point() addr, outpoint addr, size, stride,outspsize,ndim, aactive EASY_END_BLOCK; EASY_END_BLOCK; */ #endif EASY_BLOCK("insert and retrieve"); // gpu_outputGrid.pHash->insert_points(out_points); // gpu_outputGrid.pHash->retrieve_points(out_points, out_index); gpu_outputGrid.pHash->insert((uint32_t*)d_next_all_point,nActiveInput * maxi_sizec); uint32_t *d_results = NULL; /* query results*/ // Allocate memory for results gpuErrchk(cudaMalloc((void**)&d_results, sizeof(uint32_t) * nActiveInput * maxi_sizec)); gpu_outputGrid.pHash->retrieve((uint32_t*)d_next_all_point,d_results,nActiveInput * maxi_sizec); Ints out_index; out_index.resize(nActiveInput * maxi_sizec); gpuErrchk(cudaMemcpy(out_index.data(), d_results, sizeof(uint32_t) * nActiveInput * maxi_sizec, cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_results)); gpuErrchk( cudaDeviceSynchronize() ); EASY_END_BLOCK; EASY_BLOCK("query"); // can also be more efficient, but it's more complex because of the struct rulebook vector Int query_index = 0; for (Int i = 0; i < nActiveInput; i++) { for(Int k = 0; k < dimension; k++) { p[k] = in_points_flat[i + k * nActiveInput]; } auto outRegion = OutputRegionCalculator<dimension>( p, size, stride, outputSpatialSize); for (auto j : outRegion) { auto inRegion = InputRegionCalculator<dimension>(j, size, stride); Int rulesOffset = inRegion.offset(p); rules[rulesOffset].push_back(i + gpu_inputGrid.ctr); rules[rulesOffset].push_back(gpu_outputGrid.ctr + out_index[query_index++]); } } gpu_outputGrid.ctr += gpu_outputGrid.pHash->size; delete[] in_points_flat; gpuErrchk(cudaFree(d_next_all_point)); EASY_END_BLOCK; } #endif #endif template <Int dimension> Int Convolution_InputSgsToRulesAndOutputSgs( #ifdef GPU_GRID GPU_SparseGrids<dimension> &input_SGs, GPU_SparseGrids<dimension> &output_SGs, #else SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs, #endif RuleBook &rules, long *filterSize, long *filterStride, long *input_spatialSize, long *output_spatialSize, std::vector<Float3> &output_normal) { EASY_FUNCTION(profiler::colors::Green100); rules.clear(); output_SGs.clear(); Int batchSize = input_SGs.size(); output_SGs.resize(batchSize); Int output_nActive = 0; Int temp; for (Int i = 0; i < batchSize; i++) { auto &iSG = input_SGs[i]; auto &oSG = output_SGs[i]; oSG.ctr = output_nActive; Convolution_InputSgToRulesAndOutputSg<dimension>( iSG, oSG, rules, filterSize, filterStride, input_spatialSize, output_spatialSize); temp = output_nActive; output_nActive = oSG.ctr; oSG.ctr = temp; } // Debug: Print rulebook #ifdef PRINT_CONVOLUTION printf("Convolution rules:\n"); for (Int i = 0; i < (Int)rules.size(); i++) { for (Int j = 0; j < (Int)rules[i].size(); j+=2) { std::cout << "Offset: " << i << ", Rules: " << rules[i][j] << ", "<< rules[i][j+1] << std::endl; } std::cout << std::endl; } printf("output_nActive = %d\n", output_nActive); #endif return output_nActive; } #define DEBUG_FAST_CONV_RULES 0 template <Int dimension> Int Convolution_InputSgsToRulesAndOutputSgs( #ifdef GPU_GRID GPU_SparseGrids<dimension> &input_SGs, GPU_SparseGrids<dimension> &output_SGs, #else SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs, #endif RuleBook &rules, long *filterSize, long *filterStride, long *input_spatialSize, long *output_spatialSize, const std::vector<Float3> &input_normal, std::vector<Float3> &output_normal, int normal_guide_scale) { output_normal.clear(); rules.clear(); output_SGs.clear(); Int batchSize = input_SGs.size(); output_SGs.resize(batchSize); // Int input_points = 0; Int output_nActive = 0; Int temp; #if DEBUG_FAST_CONV_RULES RuleBook newRules; newRules.clear(); #endif for (Int i = 0; i < batchSize; i++) { auto &iSG = input_SGs[i]; auto &oSG = output_SGs[i]; oSG.ctr = output_nActive; if(input_normal.empty() || input_spatialSize[0] < normal_guide_scale) { #if DEBUG_FAST_CONV_RULES cudaDeviceSynchronize(); clock_t start,end; double time_gt,time_fast; start = clock(); #endif Convolution_InputSgToRulesAndOutputSg_FastDownSampleMode<dimension>( iSG, oSG, rules, filterSize, filterStride, input_spatialSize, output_spatialSize); #if DEBUG_FAST_CONV_RULES end = clock(); time_gt = (double) (end-start) / CLOCKS_PER_SEC * 1000.0; start = clock(); GPU_SparseGrid<dimension> output_SG; output_SG.ctr = output_nActive; printf("input ctr: %d %d\r\n", iSG.ctr, output_SG.ctr); Convolution_InputSgToRulesAndOutputSg<dimension>( iSG, output_SG, newRules, filterSize, filterStride, input_spatialSize, output_spatialSize); cudaDeviceSynchronize(); end = clock(); time_fast = (double) (end-start) / CLOCKS_PER_SEC * 1000.0; printf("timing: %f %f\r\n", time_gt, time_fast); for(int i = 0; i < rules.size();i++) { printf("%d %d\r\n",newRules[i].size(), rules[i].size()); for(int j = 0; j < rules[i].size();j+=2) { if(newRules[i][j] != rules[i][j] || newRules[i][j+1] != rules[i][j+1]) { printf("%d %d %d %d %d %d\r\n",i,j,newRules[i][j], newRules[i][j+1],rules[i][j],rules[i][j+1]); exit(0); } } } printf("pass verification!\r\n"); #endif } else { // printf("normal guide scale : %d\r\n", input_spatialSize[0]); Convolution_InputSgToRulesAndOutputSg<dimension>( iSG, oSG, rules, filterSize, filterStride, input_spatialSize, output_spatialSize, input_normal, output_normal); } temp = output_nActive; output_nActive = oSG.ctr; oSG.ctr = temp; } // Debug: Print rulebook #ifdef PRINT_CONVOLUTION printf("Convolution rules with normal:\n"); for (Int i = 0; i < (Int)rules.size(); i++) { for (Int j = 0; j < (Int)rules[i].size(); j+=2) { std::cout << "Offset: " << i << ", Rules: " << rules[i][j] << ", "<< rules[i][j+1] << std::endl; } std::cout << std::endl; } printf("output_nActive = %d\n", output_nActive); #endif return output_nActive; } template <Int dimension> Int Convolution_InputSgsToRulesAndOutputSgs_OMP( SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs, RuleBook &rules, long *filterSize, long *filterStride, long *input_spatialSize, long *output_spatialSize) { rules.clear(); rules.resize(volume<dimension>(filterSize)); output_SGs.clear(); Int batchSize = input_SGs.size(); output_SGs.resize(batchSize); std::vector<RuleBook> rbs(batchSize); { Int i; #pragma omp parallel for private(i) for (i = 0; i < batchSize; i++) Convolution_InputSgToRulesAndOutputSg<dimension>( input_SGs[i], output_SGs[i], rbs[i], filterSize, filterStride, input_spatialSize, output_spatialSize); } Int output_nActive = 0; for (Int i = 0; i < batchSize; i++) { // Parallel assignment: // output_nActive <- output_nActive+output_SGs[i].ctr // output_SGs[i].ctr <- output_nActive Int tmp = output_nActive; output_nActive += output_SGs[i].ctr; output_SGs[i].ctr = tmp; } { Int i; #pragma omp parallel for private(i) for (i = 0; i < (Int)rules.size(); i++) { auto &R = rules[i]; for (Int j = 0; j < batchSize; j++) { auto &r = rbs[j][i]; auto offset = output_SGs[j].ctr; for (Int k = 0; k < (Int)r.size();) { R.push_back(r[k++]); R.push_back(r[k++] + offset); } } } } return output_nActive; } // for each active site, list of (inputFeatureNumber,batchIdx, spatialOffset) // triples template <Int dimension> void SparseToDense_InputSgsToRulesAndOutputSgs( SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) { Int batchSize = input_SGs.size(); rules.clear(); rules.resize(batchSize); Point<dimension> lb, ub; for (Int i = 0; i < dimension; ++i) { lb[i] = 0; ub[i] = spatialSize[i] - 1; } auto region = RectangularRegion<dimension>(lb, ub); for (Int batchIdx = 0; batchIdx < batchSize; batchIdx++) { auto &iSG = input_SGs[batchIdx]; for (auto const &inIter : iSG.mp) { rules[batchIdx].push_back(inIter.second + iSG.ctr); rules[batchIdx].push_back(region.offset(inIter.first)); } } } template <Int dimension> void SparseToDense_InputSgsToRulesAndOutputSgs_OMP( SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) { Int batchSize = input_SGs.size(); rules.clear(); rules.resize(batchSize); Point<dimension> lb, ub; for (Int i = 0; i < dimension; ++i) { lb[i] = 0; ub[i] = spatialSize[i] - 1; } auto region = RectangularRegion<dimension>(lb, ub); Int batchIdx; #pragma omp parallel for private(batchIdx) for (batchIdx = 0; batchIdx < batchSize; batchIdx++) { auto &iSG = input_SGs[batchIdx]; for (auto const &inIter : iSG.mp) { rules[batchIdx].push_back(inIter.second + iSG.ctr); rules[batchIdx].push_back(region.offset(inIter.first)); } } } #endif /* CONVOLUTIONRULES_H */
ppc64le-varargs-f128.c
// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 -mabi=ieeelongdouble \ // RUN: -o - %s | FileCheck %s -check-prefix=IEEE // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 \ // RUN: -o - %s | FileCheck %s -check-prefix=IBM // RUN: %clang_cc1 -triple ppc64le -emit-llvm-bc %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -mabi=ieeelongdouble -fopenmp \ // RUN: -fopenmp-targets=ppc64le -o %t-ppc-host.bc // RUN: %clang_cc1 -triple ppc64le -aux-triple ppc64le %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -fopenmp -fopenmp-is-device -emit-llvm \ // RUN: -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s \ // RUN: -check-prefix=OMP-TARGET // RUN: %clang_cc1 -triple ppc64le %t-ppc-host.bc -emit-llvm -o - | FileCheck %s \ // RUN: -check-prefix=OMP-HOST #include <stdarg.h> void foo_ld(long double); void foo_fq(__float128); // Verify cases when OpenMP target's and host's long-double semantics differ. // OMP-TARGET-LABEL: define internal void @.omp_outlined.( // OMP-TARGET: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** // OMP-TARGET: %[[V2:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // OMP-TARGET: %[[V3:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V2]], align 8 // OMP-TARGET: call void @foo_ld(ppc_fp128 %[[V3]]) // OMP-HOST-LABEL: define void @omp( // OMP-HOST: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // OMP-HOST: call void @llvm.va_start(i8* %[[AP1]]) // OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]], align 8 // OMP-HOST: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // OMP-HOST: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // OMP-HOST: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // OMP-HOST: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // OMP-HOST: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // OMP-HOST: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // OMP-HOST: call void @foo_ld(fp128 %[[V4]]) void omp(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); #pragma omp target parallel for (int i = 1; i < n; ++i) { foo_ld(va_arg(ap, long double)); } va_end(ap); } // IEEE-LABEL: define void @f128 // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_fq(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) void f128(int n, ...) { va_list ap; va_start(ap, n); foo_fq(va_arg(ap, __float128)); va_end(ap); } // IEEE-LABEL: define void @long_double // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_ld(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) // IBM-LABEL: define void @long_double // IBM: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IBM: call void @llvm.va_start(i8* %[[AP1]]) // IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IBM: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V3]], align 8 // IBM: call void @foo_ld(ppc_fp128 %[[V4]]) // IBM: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IBM: call void @llvm.va_end(i8* %[[AP2]]) void long_double(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); va_end(ap); }
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/color-private.h" #include "magick/cache.h" #include "magick/draw.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/resource_.h" #include "magick/transform.h" #include "magick/signature-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/option.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; InterpolatePixelMethod interpolate; VirtualPixelMethod virtual_pixel; FilterTypes filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; MagickPixelPacket average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireVirtualCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter, 1.0); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireMagickMemory( sizeof(*resample_filter)); if (resample_filter == (ResampleFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception); resample_filter->debug=IsEventLogging(); resample_filter->signature=MagickCoreSignature; resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined = MagickFalse; /* initialise the resampling filter settings */ SetResampleFilter(resample_filter, image->filter, image->blur); (void) SetResampleFilterInterpolateMethod(resample_filter, image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,MagickPixelPacket *pixel) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, MagickPixelPacket *pixel) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const PixelPacket *pixels; register const IndexPacket *indexes; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetMagickPixelPacket(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel, resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there a better way? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image, &average_image->exception); pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const PixelPacket *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view); SetMagickPixelPacket(resample_filter->image,pixels,indexes, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->average_pixel.opacity)); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->image->background_color.opacity)); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.opacity += resample_filter->image->background_color.opacity; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.opacity /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->matte != MagickFalse) pixel->opacity = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewVirtualIndexQueue(resample_filter->view); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { weight = 0; #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif if (pixel->matte != MagickFalse) pixel->opacity += weight*pixels->opacity; divisor_m += weight; if (pixel->matte != MagickFalse) weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity)); pixel->red += weight*pixels->red; pixel->green += weight*pixels->green; pixel->blue += weight*pixels->blue; if (pixel->colorspace == CMYKColorspace) pixel->index += weight*(*indexes); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels++; indexes++; Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->opacity = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->matte != MagickFalse) pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity); divisor_c = 1.0/divisor_c; pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red); pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major= MagickMaximumValue; else Major= sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterTypes filter,const double blur) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % % o blur: filter blur factor (radial scaling) for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterTypes filter,const double blur) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,blur,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsMagickTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) ) { register int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# As such its distribution is not uniform.\n"); printf("#\n"); printf("# The X value is the support distance for the Y weight\n"); printf("# so you can use gnuplot to plot this cylindrical filter\n"); printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); printf("#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const InterpolatePixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
soxr.c
/* SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; #define RESET_ON_CLEAR (1u<<31) /* TODO: these should not be here. */ #define TO_3dB(a) ((1.6e-6*a-7.5e-4)*a+.646) #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned quality = recipe & 0xf; double rej; memset(p, 0, sizeof(*p)); if (quality > 13) { p->e = "invalid quality type"; return spec; } flags |= quality < SOXR_LSR0Q? RESET_ON_CLEAR : 0; if (quality == 13) quality = 6; else if (quality > 10) quality = 0; p->phase_response = "\62\31\144"[(recipe & 0x30) >> 4]; p->stopband_begin = 1; p->precision = !quality? 0: quality < 3? 16 : quality < 8? 4 + quality * 4 : 55 - quality * 4; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality < 8) { p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / TO_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - 8]; if (quality - 8 == 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / TO_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if HAVE_SIMD static bool cpu_has_simd(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __GNUC__ && defined i386 uint32_t eax, ebx, ecx, edx; __asm__ __volatile__ ( "pushl %%ebx \n\t" "cpuid \n\t" "movl %%ebx, %1\n\t" "popl %%ebx \n\t" : "=a"(eax), "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"(1) : "cc" ); return !!(edx & 0x06000000); #elif defined _MSC_VER && defined _M_IX86 uint32_t d; __asm { xor eax, eax inc eax push ebx cpuid pop ebx mov d, edx } return !!(d & 0x06000000); #endif return false; } #endif extern control_block_t _soxr_rate32s_cb, _soxr_rate32_cb, _soxr_rate64_cb, _soxr_vr32_cb; soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate? input_rate? input_rate / output_rate : -1 : input_rate? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if HAVE_SINGLE_PRECISION if (!HAVE_DOUBLE_PRECISION || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) || (p->q_spec.flags & SOXR_VR)) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; memcpy(&p->control_block, (p->q_spec.flags & SOXR_VR)? &_soxr_vr32_cb : #if HAVE_SIMD cpu_has_simd()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb, sizeof(p->control_block)); } #if HAVE_DOUBLE_PRECISION else #endif #endif #if HAVE_DOUBLE_PRECISION { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; memcpy(&p->control_block, &_soxr_rate64_cb, sizeof(p->control_block)); } #endif if (p->num_channels && io_ratio) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "Varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return (p->q_spec.flags & RESET_ON_CLEAR)? soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif { if (p->num_channels > 1) { for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); } else done = soxr_output_1ch(p, 0, out, len, separated); } if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler = NULL; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
cryptocontext.h
/** * @file cryptocontext.h -- Control for encryption operations. * @author TPOC: palisade@njit.edu * * @section LICENSE * * Copyright (c) 2017, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #define SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #include "palisade.h" #include "encoding/plaintext.h" #include "encoding/byteplaintextencoding.h" #include "encoding/intplaintextencoding.h" #include "encoding/packedintplaintextencoding.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" namespace lbcrypto { template<typename Element> class CryptoContextFactory; /** * @brief CryptoContext * * A CryptoContext is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a CryptoContext; we say that various objects are * "created in" a context, and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContext methods. Guards are implemented to make certain that * only valid objects that have been created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized and recovered from a serialization */ template<typename Element> class CryptoContext : public Serializable { friend class CryptoContextFactory<Element>; private: shared_ptr<LPCryptoParameters<Element>> params; /*!< crypto parameters used for this context */ shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; /*!< algorithm used; accesses all crypto methods */ vector<shared_ptr<LPEvalKey<Element>>> evalMultKeys; /*!< cached evalmult keys */ std::map<usint, shared_ptr<LPEvalKey<Element>>> evalSumKeys; /*!< cached evalsum keys */ bool doTiming; vector<TimingInfo>* timeSamples; /** * Private methods to compare two contexts; this is only used internally and is not generally available * @param a - shared pointer in the object * @param b - this object, usually * @return true if the shared pointer is a pointer to "this" */ friend bool operator==(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { if( a.params.get() != b.params.get() ) return false; if( a.evalMultKeys.size() != b.evalMultKeys.size() ) return false; for( size_t i=0; i<a.evalMultKeys.size(); i++ ) if( a.evalMultKeys[i].get() != b.evalMultKeys[i].get() ) return false; if( a.evalSumKeys.size() != b.evalSumKeys.size() ) return false; for (const auto& kp : a.evalSumKeys) { const auto& vb = b.evalSumKeys.find(kp.first); if( vb == b.evalSumKeys.end() ) return false; // key in a not in b if( *kp.second != *vb->second ) return false; // mismatch } return true; } friend bool operator!=(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { return !( a == b ); } public: /** * CryptoContext constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContext(LPCryptoParameters<Element> *params = 0, LPPublicKeyEncryptionScheme<Element> *scheme = 0) { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; } /** * CryptoContext constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContext(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme) { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; } /** * Copy constructor * @param c - source */ CryptoContext(const CryptoContext<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; evalMultKeys = c.evalMultKeys; evalSumKeys = c.evalSumKeys; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContext<Element>& operator=(const CryptoContext<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; evalMultKeys = rhs.evalMultKeys; evalSumKeys = rhs.evalSumKeys; return *this; } /** * A CryptoContext is only valid if the shared pointers are both valid */ operator bool() const { return bool(params) && bool(scheme); } /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } /** * Serialize the CryptoContext * * @param serObj - rapidJson object for the serializaion * @return true on success */ bool Serialize(Serialized* serObj) const; /** * Deserialize the context AND initialize the algorithm * * @param serObj * @return true on success */ bool Deserialize(const Serialized& serObj) { throw std::logic_error("Deserialize by using CryptoContextFactory::DeserializeAndCreateContext"); } /** * Enable a particular feature for use with this CryptoContext * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } /** * Getter for element params * @return */ const shared_ptr<typename Element::Params> GetElementParams() const { return params->GetElementParams(); } /** * Get the cyclotomic order used for this context * * @return */ const usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ const usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const BigInteger& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const BigInteger& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(this, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeyGen, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using this algorithm's KeyGen method from two keys * @param pk first public key used to coordinate the creation of later public keys. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const shared_ptr<LPPublicKey<Element>> pk) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(this, pk, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKey, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using a vector of secret keys * @param secretKeys a vector of the secret keys to be used for multiparty computation. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const vector<shared_ptr<LPPrivateKey<Element>>>& secretKeys) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(this, secretKeys, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKeyvec, currentDateTime() - start) ); } return r; } /** * Lead Multiparty Decryption method for PALISADE multiparty operations. * This should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param privateKey the secret key of the lead decryption client * @param ciphertext vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> MultipartyDecryptLead( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to MultipartyDecryptLead was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to MultipartyDecryptLead was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptLead(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptLead, currentDateTime() - start) ); } return newCiphertext; } /** * Multiparty decryption method for PALISADE multiparty operations. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform this MultipartyDecryptMain operation. * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> MultipartyDecryptMain( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to MultipartyDecryptMain was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to MultipartyDecryptMain was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptMain(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptMain, currentDateTime() - start) ); } return newCiphertext; } /** * Final multiparty decryption method to fuse the partially decrypted ciphertexts into a decrypted plaintext. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param partialCiphertextVec - vector of partially decrypted ciphertexts. * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult MultipartyDecryptFusion( const std::vector<vector<shared_ptr<Ciphertext<Element>>>>& partialCiphertextVec, Plaintext *plaintext, bool doPadding = true) const { //Make sure we're processing ciphertexts. size_t last_ciphertext = partialCiphertextVec.size(); if (last_ciphertext < 1 ) return DecryptResult(); //Make sure ciphertexts are of non-zero length and that they'r eof the same length/ size_t ciphertext_size = partialCiphertextVec[0].size(); for( size_t i = 0; i < last_ciphertext; i++ ) { std::vector<shared_ptr<Ciphertext<Element>>> ciphertext = partialCiphertextVec[i]; // edge case if (ciphertext.size() == 0 || ciphertext.size() != ciphertext_size) return DecryptResult(); } double start = 0; if( doTiming ) start = currentDateTime(); size_t lastone = partialCiphertextVec[0].size() - 1; for( size_t ch = 0; ch < ciphertext_size; ch++ ) { vector<shared_ptr<Ciphertext<Element>>> ciphertextVec; for( size_t i = 0; i < last_ciphertext; i++ ) { std::vector<shared_ptr<Ciphertext<Element>>> ciphertext = partialCiphertextVec[i]; // edge case if (ciphertext[ch] == NULL || ciphertext[ch]->GetCryptoContext() != this) throw std::logic_error("A ciphertext passed to MultipartyDecryptFusion was not generated with this crypto context"); ciphertextVec.push_back(ciphertext[ch]); } Poly decrypted; DecryptResult result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(ciphertextVec, &decrypted); if (result.isValid == false) return result; plaintext->Decode(ciphertextVec[0]->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if (ch == lastone && doPadding) { plaintext->Unpad(ciphertextVec[0]->GetCryptoParameters()->GetPlaintextModulus()); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptFusion, currentDateTime() - start) ); } return DecryptResult(plaintext->GetLength()); } /** * SparseKeyGen generates a key pair with special structure, and without full entropy, * for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(this, true); if( doTiming ) { timeSamples->push_back( TimingInfo(OpSparseKeyGen, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> ReKeyGen( const shared_ptr<LPPublicKey<Element>> newKey, const shared_ptr<LPPrivateKey<Element>> oldKey) const { if( newKey == NULL || oldKey == NULL || newKey->GetCryptoContext() != this || oldKey->GetCryptoContext() != this ) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPubPri, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> ReKeyGen( const shared_ptr<LPPrivateKey<Element>> newKey, const shared_ptr<LPPrivateKey<Element>> oldKey) const { if (newKey == NULL || oldKey == NULL || newKey->GetCryptoContext() != this || oldKey->GetCryptoContext() != this) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPriPri, currentDateTime() - start) ); } return r; } /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult operator * @param key * @return new evaluation key */ void EvalMultKeyGen(const shared_ptr<LPPrivateKey<Element>> key); /** * GetEvalMultKey fetches the cached eval mult keys * * @return the key to use */ const shared_ptr<LPEvalKey<Element>> GetEvalMultKey() const { if( evalMultKeys.size() != 1 ) throw std::logic_error("You need to use EvalMultKeyGen so that you have an EvalMultKey available"); return evalMultKeys[0]; } /** * SetEvalMultKeys is used by the deserializer to initialize the keyset for EvalSum * FIXME should be private? * @param evalMultKeys - new key map */ void SetEvalMultKeys(vector<shared_ptr<LPEvalKey<Element>>>& evalMultKeysNew) { evalMultKeys.clear(); this->evalMultKeys = evalMultKeysNew; } /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch operation * @param key1 * @param key2 * @return new evaluation key */ shared_ptr<LPEvalKey<Element>> KeySwitchGen( const shared_ptr<LPPrivateKey<Element>> key1, const shared_ptr<LPPrivateKey<Element>> key2) const { if( key1 == NULL || key2 == NULL || key1->GetCryptoContext() != this || key2->GetCryptoContext() != this ) throw std::logic_error("Keys passed to KeySwitchGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitchGen, currentDateTime() - start) ); } return r; } /** * Encrypt method for PALISADE * @param publicKey - for encryption * @param plaintext - to encrypt * @param doPadding - if true, pad the input out to fill the encrypted chunk * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ std::vector<shared_ptr<Ciphertext<Element>>> Encrypt( const shared_ptr<LPPublicKey<Element>> publicKey, const Plaintext& plaintext, bool doPadding = true, bool doEncryption = true) const { std::vector<shared_ptr<Ciphertext<Element>>> cipherResults; if( publicKey == NULL || publicKey->GetCryptoContext() != this ) throw std::logic_error("key passed to Encrypt was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); size_t chunkSize = plaintext.GetChunksize(publicKey->GetCryptoContext()->GetRingDimension(), ptm); size_t ptSize = plaintext.GetLength(); size_t rounds = ptSize / chunkSize; if (doPadding == false && ptSize%chunkSize != 0 && typeid(plaintext) == typeid(BytePlaintextEncoding)) { throw std::logic_error("Cannot Encrypt without padding with chunksize " + std::to_string(chunkSize) + " and plaintext size " + std::to_string(ptSize)); } // if there is a partial chunk OR if there isn't but we need to pad if (ptSize%chunkSize != 0 || doPadding == true) rounds += 1; double start = 0; if( doTiming ) start = currentDateTime(); for (size_t bytes = 0, i = 0; i < rounds; bytes += chunkSize, i++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext.Encode(ptm, &pt, bytes, chunkSize); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); if (!ciphertext) { cipherResults.clear(); break; } cipherResults.push_back(ciphertext); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncrypt, currentDateTime() - start) ); } return cipherResults; } /** * Encrypt a matrix of plaintexts (integer encoding) * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const shared_ptr<LPPublicKey<Element>> publicKey, const Matrix<IntPlaintextEncoding> &plaintext, bool doEncryption = true) { auto zeroAlloc = [=]() { return make_unique<RationalCiphertext<Element>>(this, true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); if (publicKey == NULL || publicKey->GetCryptoContext() != this) throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext(row,col).Encode(ptm, &pt); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); (*cipherResults)(row, col).SetNumerator(*ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, currentDateTime() - start) ); } return cipherResults; } /** * Encrypt a matrix of plaintexts (packed encoding) * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const shared_ptr<LPPublicKey<Element>> publicKey, const Matrix<PackedIntPlaintextEncoding> &plaintext, bool doEncryption = true) { auto zeroAlloc = [=]() { return make_unique<RationalCiphertext<Element>>(this, true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); if (publicKey == NULL || publicKey->GetCryptoContext() != this) throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context"); const BigInteger& ptm = publicKey->GetCryptoParameters()->GetPlaintextModulus(); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); plaintext(row, col).Encode(ptm, &pt); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); (*cipherResults)(row, col).SetNumerator(*ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPacked, currentDateTime() - start) ); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each piece of ciphertext, * and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return */ void EncryptStream( const shared_ptr<LPPublicKey<Element>> publicKey, std::istream& instream, std::ostream& outstream, bool doEncryption = true) const { // NOTE timing this operation is not supported if( publicKey == NULL || publicKey->GetCryptoContext() != this ) throw std::logic_error("key passed to EncryptStream was not generated with this crypto context"); bool padded = false; BytePlaintextEncoding px; const BigInteger& ptm = publicKey->GetCryptoContext()->GetCryptoParameters()->GetPlaintextModulus(); size_t chunkSize = px.GetChunksize(publicKey->GetCryptoContext()->GetRingDimension(), ptm); char *ptxt = new char[chunkSize]; while (instream.good()) { instream.read(ptxt, chunkSize); size_t nRead = instream.gcount(); if (nRead <= 0 && padded) break; BytePlaintextEncoding px(ptxt, nRead); if (nRead < chunkSize) { padded = true; } Poly pt(publicKey->GetCryptoParameters()->GetElementParams()); px.Encode(publicKey->GetCryptoParameters()->GetPlaintextModulus(), &pt, 0, chunkSize); shared_ptr<Ciphertext<Element>> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, pt, doEncryption); if (!ciphertext) { delete [] ptxt; return; } Serialized cS; if (ciphertext->Serialize(&cS)) { if (!SerializableHelper::SerializationToStream(cS, outstream)) { delete [] ptxt; return; } } else { delete [] ptxt; return; } } delete [] ptxt; return; } /** * Decrypt method for PALISADE * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult Decrypt( const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext, Plaintext *plaintext, bool doPadding = true) const { // edge case if (ciphertext.size() == 0) return DecryptResult(); if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to Decrypt was not generated with this crypto context"); size_t lastone = ciphertext.size() - 1; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t ch = 0; ch < ciphertext.size(); ch++ ) { if( ciphertext[ch] == NULL || ciphertext[ch]->GetCryptoContext() != this ) throw std::logic_error("A ciphertext passed to Decrypt was not generated with this crypto context"); Poly decrypted; DecryptResult result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext[ch], &decrypted); if (result.isValid == false) return result; plaintext->Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if (ch == lastone && doPadding) { plaintext->Unpad(privateKey->GetCryptoParameters()->GetPlaintextModulus()); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecrypt, currentDateTime() - start) ); } return DecryptResult(plaintext->GetLength()); } /** * Decrypt method for a matrix of ciphertexts (integer encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<IntPlaintextEncoding> *numerator, Matrix<IntPlaintextEncoding> *denominator) const { // edge case if ((ciphertext->GetCols()== 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols())|| (ciphertext->GetRows() != numerator->GetRows()) || (ciphertext->GetCols() != denominator->GetCols()) || (ciphertext->GetRows() != denominator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); if (resultN.isValid == false) return resultN; (*numerator)(row,col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); Poly decryptedDenominator; if( (*ciphertext)(row,col).GetIntegerFlag() == true ) { decryptedDenominator = decryptedNumerator.CloneParametersOnly(); decryptedDenominator.SetValuesToZero(); decryptedDenominator.SetValAtIndex(0,1); } else { const shared_ptr<Ciphertext<Element>> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator); if (resultD.isValid == false) return resultD; } (*denominator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedDenominator); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, currentDateTime() - start) ); } return DecryptResult((*numerator)(numerator->GetRows()-1,numerator->GetCols()-1).GetLength()); } /** * Decrypt method for a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<PackedIntPlaintextEncoding> *numerator, Matrix<PackedIntPlaintextEncoding> *denominator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols()) || (ciphertext->GetRows() != numerator->GetRows()) || (ciphertext->GetCols() != denominator->GetCols()) || (ciphertext->GetRows() != denominator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); if (resultN.isValid == false) return resultN; (*numerator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); const shared_ptr<Ciphertext<Element>> ctD = (*ciphertext)(row, col).GetDenominator(); Poly decryptedDenominator; DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator); if (resultD.isValid == false) return resultD; (*denominator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedDenominator); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPacked, currentDateTime() - start) ); } return DecryptResult((*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1).GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, Matrix<PackedIntPlaintextEncoding> *numerator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if ((ciphertext->GetCols() != numerator->GetCols()) || (ciphertext->GetRows() != numerator->GetRows())) throw std::runtime_error("Ciphertext and plaintext matrices have different dimensions"); if (privateKey == NULL || privateKey->GetCryptoContext() != this) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if (doTiming) start = currentDateTime(); //force all precomputations to take place in advance if ((*ciphertext)(0, 0).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(0, 0).GetNumerator(); Poly decryptedNumerator; //DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); //if (resultN.isValid == false) return resultN; (*numerator)(0, 0).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if ((*ciphertext)(row, col).GetCryptoContext() != this) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const shared_ptr<Ciphertext<Element>> ctN = (*ciphertext)(row, col).GetNumerator(); Poly decryptedNumerator; //DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator); //if (resultN.isValid == false) return resultN; (*numerator)(row, col).Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decryptedNumerator); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, currentDateTime() - start)); } return DecryptResult((*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1).GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return */ void DecryptStream( const shared_ptr<LPPrivateKey<Element>> privateKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( privateKey == NULL || privateKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to DecryptStream was not generated with this crypto context"); Serialized serObj; size_t tot = 0; bool firstTime = true; BytePlaintextEncoding pte[2]; bool whichArray = false; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { shared_ptr<Ciphertext<Element>> ct; if( (ct = deserializeCiphertext(serObj)) != NULL ) { Poly decrypted; DecryptResult res = GetEncryptionAlgorithm()->Decrypt(privateKey, ct, &decrypted); if( !res.isValid ) return; tot += res.messageLength; pte[whichArray].Decode(privateKey->GetCryptoParameters()->GetPlaintextModulus(), &decrypted); if( !firstTime ) { outstream << pte[!whichArray]; pte[!whichArray].clear(); } firstTime = false; whichArray = !whichArray; } else return; } // unpad and write the last one pte[!whichArray].Unpad(privateKey->GetCryptoParameters()->GetPlaintextModulus()); outstream << pte[!whichArray]; return; } /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @return vector of shared pointers to re-encrypted ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> ReEncrypt( shared_ptr<LPEvalKey<Element>> evalKey, std::vector<shared_ptr<Ciphertext<Element>>>& ciphertext) const { if( evalKey == NULL || evalKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to ReEncrypt was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i=0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("One of the ciphertexts passed to ReEncrypt was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpReEncrypt, currentDateTime() - start) ); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted ciphertext */ void ReEncryptStream( const shared_ptr<LPEvalKey<Element>> evalKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( evalKey == NULL || evalKey->GetCryptoContext() != this ) throw std::logic_error("Information passed to ReEncryptStream was not generated with this crypto context"); Serialized serObj; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { shared_ptr<Ciphertext<Element>> ct; ct = deserializeCiphertext(serObj); if( ct ) { std::vector<shared_ptr<Ciphertext<Element>>> allCt; allCt.push_back(ct); std::vector<shared_ptr<Ciphertext<Element>>> reCt = ReEncrypt(evalKey, allCt); Serialized serReObj; if( reCt[0]->Serialize(&serReObj) ) { SerializableHelper::SerializationToStream(serReObj, outstream); } else { return; } allCt.clear(); } else { return; } } } /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ shared_ptr<Ciphertext<Element>> EvalAdd(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalAdd was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAdd, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ shared_ptr<Ciphertext<Element>> EvalSub(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalSub was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSub, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalAddPLain - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ shared_ptr<Ciphertext<Element>> EvalAddPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { double start = 0; if( doTiming ) start = currentDateTime(); auto rv = EvalAdd(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddPlain, currentDateTime() - start) ); } return rv; } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ shared_ptr<Ciphertext<Element>> EvalSubPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { double start = 0; if( doTiming ) start = currentDateTime(); auto rv = EvalSub(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubPlain, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ shared_ptr<Ciphertext<Element>> EvalMult(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2) const { if( ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); auto ek = GetEvalMultKey(); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { // tests needed for context double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalMult - PALISADE EvalMult method for a a multiplication of ciphertext by plaintext * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ shared_ptr<Ciphertext<Element>> EvalMultPlain(const shared_ptr<Ciphertext<Element>> ciphertext, const shared_ptr<Ciphertext<Element>> plaintext) const { if (ciphertext == NULL || plaintext == NULL || ciphertext->GetCryptoContext() != this || plaintext->GetCryptoContext() != this) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMultPlain(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultPlain, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts, followed by recrypt with given key * @param ct1 * @param ct2 * @param ek * @return new ciphertext for ct1 * ct2, recrypted with ek */ shared_ptr<Ciphertext<Element>> EvalMult(const shared_ptr<Ciphertext<Element>> ct1, const shared_ptr<Ciphertext<Element>> ct2, const shared_ptr<LPEvalKey<Element>> ek) const { if( ct1 == NULL || ct2 == NULL || ek == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this || ek->GetCryptoContext() != this ) throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultKey, currentDateTime() - start) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Ciphertext<Element>> EvalNegate(const shared_ptr<Ciphertext<Element>> ct) const { if (ct == NULL || ct->GetCryptoContext() != this) throw std::logic_error("Information passed to EvalNegate was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNeg, currentDateTime() - start) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ct->GetAllocator(), ct->GetRows(), ct->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = -((*ct)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNegMatrix, currentDateTime() - start) ); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, shared_ptr<LPEvalKey<Element>>>> EvalAutomorphismKeyGen(const shared_ptr<LPPublicKey<Element>> publicKey, const shared_ptr<LPPrivateKey<Element>> origPrivateKey, const std::vector<usint> &indexList) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(publicKey, origPrivateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismKeyGen, currentDateTime() - start) ); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalAutomorphism(const shared_ptr<Ciphertext<Element>> ciphertext, usint i, const std::map<usint, shared_ptr<LPEvalKey<Element>>> &evalKeys) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismI, currentDateTime() - start) ); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, shared_ptr<LPEvalKey<Element>>>> EvalAutomorphismKeyGen(const shared_ptr<LPPrivateKey<Element>> privateKey, const std::vector<usint> &indexList) const { //need to add exception handling double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismK, currentDateTime() - start) ); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen( const shared_ptr<LPPrivateKey<Element>> privateKey, const shared_ptr<LPPublicKey<Element>> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ const std::map<usint, shared_ptr<LPEvalKey<Element>>>& GetEvalSumKey() const; /** * SetEvalSumKeys - used by deserializer to set the keys for EvalSum * FIXME should be private? * @param evalSumKeys - new key map */ void SetEvalSumKeys(std::map<usint, shared_ptr<LPEvalKey<Element>>>& evalSumKeys) { this->evalSumKeys.clear(); this->evalSumKeys = evalSumKeys; } /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalSum(const shared_ptr<Ciphertext<Element>> ciphertext, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ shared_ptr<Ciphertext<Element>> EvalInnerProduct(const shared_ptr<Ciphertext<Element>> ciphertext1, const shared_ptr<Ciphertext<Element>> ciphertext2, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ shared_ptr<Ciphertext<Element>> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * EvalLinRegressBatched- Computes the parameter vector for linear regression using the least squares method * Supported only in batched mode; currently works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { //if (ct1 == NULL || ct2 == NULL || ct1->GetCryptoContext() != this || ct2->GetCryptoContext() != this) // throw std::logic_error("Information passed to EvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLinRegression, currentDateTime() - start) ); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new Ciphertext after applying key switch */ shared_ptr<Ciphertext<Element>> KeySwitch( const shared_ptr<LPEvalKey<Element>> keySwitchHint, const shared_ptr<Ciphertext<Element>> ciphertext) const { if( keySwitchHint == NULL || keySwitchHint->GetCryptoContext() != this ) throw std::logic_error("Key passed to KeySwitch was not generated with this crypto context"); if( ciphertext == NULL || ciphertext->GetCryptoContext() != this ) throw std::logic_error("Ciphertext passed to KeySwitch was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitch, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Ciphertext<Element>> ModReduce(shared_ptr<Ciphertext<Element>> ciphertext) const { if( ciphertext == NULL || ciphertext->GetCryptoContext() != this ) throw std::logic_error("Information passed to ModReduce was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational(RationalCiphertext<Element> ciphertext) const { double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Ciphertext<Element>> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); shared_ptr<Ciphertext<Element>> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return RationalCiphertext<Element>(n,d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix(shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = ModReduceRational((*ciphertext)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduceMatrix, currentDateTime() - start) ); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ shared_ptr<Ciphertext<Element>> LevelReduce(const shared_ptr<Ciphertext<Element>> cipherText1, const shared_ptr<LPEvalKeyNTRU<Element>> linearKeySwitchHint) const { if( cipherText1 == NULL || linearKeySwitchHint == NULL || cipherText1->GetCryptoContext() != this || linearKeySwitchHint->GetCryptoContext() != this) { throw std::logic_error("Information passed to LevelReduce was not generated with this crypto context"); } double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->LevelReduce(cipherText1, linearKeySwitchHint); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLevelReduce, currentDateTime() - start) ); } return rv; } /** * RingReduce - PALISADE RingReduce method * @param ciphertext - vector of ciphertext * @param keySwitchHint - the keySwitchHint from original private key to sparse private key * @return vector of ring-reduced ciphertexts */ std::vector<shared_ptr<Ciphertext<Element>>> RingReduce( std::vector<shared_ptr<Ciphertext<Element>>> ciphertext, const shared_ptr<LPEvalKey<Element>> keySwitchHint) const { if( keySwitchHint == NULL || keySwitchHint->GetCryptoContext() != this ) throw std::logic_error("Key passed to RingReduce was not generated with this crypto context"); std::vector<shared_ptr<Ciphertext<Element>>> newCiphertext(ciphertext.size()); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t i = 0; i < ciphertext.size(); i++) { if( ciphertext[i] == NULL || ciphertext[i]->GetCryptoContext() != this ) throw std::logic_error("Ciphertext passed to RingReduce was not generated with this crypto context"); newCiphertext[i] = GetEncryptionAlgorithm()->RingReduce(ciphertext[i], keySwitchHint); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpRingReduce, currentDateTime() - start) ); } return newCiphertext; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original private key to the quadratic key * return vector of resulting ciphertext */ shared_ptr<Ciphertext<Element>> ComposedEvalMult( const shared_ptr<Ciphertext<Element>> ciphertext1, const shared_ptr<Ciphertext<Element>> ciphertext2) const { if( ciphertext1 == NULL || ciphertext2 == NULL || ciphertext1->GetCryptoContext() != this || ciphertext2->GetCryptoContext() != this ) throw std::logic_error("Ciphertexts passed to ComposedEvalMult was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, GetEvalMultKey()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpComposedEvalMult, currentDateTime() - start) ); } return rv; } /** * Deserialize into a Public Key * @param serObj * @return deserialized object */ shared_ptr<LPPublicKey<Element>> deserializePublicKey(const Serialized& serObj); /** * Deserialize into a Private Key * @param serObj * @return deserialized object */ shared_ptr<LPPrivateKey<Element>> deserializeSecretKey(const Serialized& serObj); /** * Deserialize into a Ciphertext * @param serObj * @return deserialized object */ shared_ptr<Ciphertext<Element>> deserializeCiphertext(const Serialized& serObj); /** * Deserialize into an Eval Key * @param serObj * @return deserialized object */ shared_ptr<LPEvalKey<Element>> deserializeEvalKey(const Serialized& serObj); }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template<typename Element> class CryptoObject { protected: CryptoContext<Element> *context; public: CryptoObject(CryptoContext<Element> *cc = 0) : context(cc) {} virtual ~CryptoObject() {} CryptoContext<Element> *GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from user parameters * */ template<typename Element> class CryptoContextFactory { public: /** * construct a PALISADE CryptoContext for the LTV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextLTV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the LTV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextLTV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the LTV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( const usint plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContext for the LTV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( shared_ptr<EncodingParams> encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContext for the FV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0"); /** * construct a PALISADE CryptoContext for the FV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0"); /** * construct a PALISADE CryptoContext for the FV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV( const usint plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContext for the FV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextFV( shared_ptr<EncodingParams> encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContext for the BV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextBV(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextBV(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextSHIELD(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the BV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextSHIELD(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContext for the StehleSteinfeld Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, const usint plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the StehleSteinfeld Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static shared_ptr<CryptoContext<Element>> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, shared_ptr<EncodingParams> encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContext for the Null Scheme * @param modulus * @return */ static shared_ptr<CryptoContext<Element>> genCryptoContextNull(shared_ptr<typename Element::Params> ep, const usint ptModulus); /** * construct a PALISADE CryptoContext for the Null Scheme * @param modulus * @return */ static shared_ptr<CryptoContext<Element>> genCryptoContextNull(shared_ptr<typename Element::Params> ep, shared_ptr<EncodingParams> encodingParams); /** * Create a PALISADE CryptoContext from a serialization * @param serObj * @return new context */ static shared_ptr<CryptoContext<Element>> DeserializeAndCreateContext(const Serialized& serObj, bool noKeys = false); }; } #endif /* SRC_DEMO_PRE_CRYPTOCONTEXT_H_ */
hello.c
#include <stdio.h> int main(int argc, char *argv[]) { printf("Hello world!\n"); #pragma omp parallel { printf("X\n"); } return 0; }
pr83977-1.c
/* PR middle-end/83977 */ /* { dg-do compile } */ /* { dg-additional-options "-O2" } */ struct S { int a, b, c; }; #pragma omp declare simd uniform(z) linear(v:1) __attribute__((noinline)) static int foo (int x, int y, struct S z, int u, int v) { return x + y + z.a; } int bar (int x, int y, int z) { struct S s = { z, 1, 1 }; return foo (x, y, s, 0, 0); }
looper.h
// // Created by Lei Ma on 9/6/17. // #ifndef HALO_PARALLEL_LOOPER_H #define HALO_PARALLEL_LOOPER_H #include "initializer.h" #include "stepper.h" #include "helper.h" #include <omp.h> //For openmp // #include "recorder.h"// For test namespace Looper { void vacuum_euler_forward( state_type rho_self_array [], const double dt, const int length){ // loop through for iter iterations for(int i =0; i<length-1; i++){ rho_self_array[i+1] = Stepper::vacuum_euler_forward(rho_self_array[i], dt) ; } } void interaction_euler_forward( state_type rho_self_array [], state_type rho_counter_array [], const double dt, const int length){ // loop through for iter iterations // state_type rhs; for(int i =0; i<length-1; i++){ Stepper::euler_forward(rho_self_array[i+1], rho_self_array[i], rho_counter_array[length - 1 - i], dt) ; // rho_self_array[i+1] = rhs; } } void halo_euler_forward( StateArray rho_forward_array, StateArray rho_backward_array, const double dt, const int length){ // loop through for iter iterations state_type rhs; for(int i =0; i<length-1; i++){ Stepper::euler_forward(rhs, rho_forward_array[i], rho_backward_array[length - 1 - i], dt) ; rho_forward_array[i+1] = rhs; Stepper::euler_forward(rhs, rho_backward_array[i], rho_forward_array[length - 1 - i], dt) ; rho_backward_array[i+1] = rhs; } } void halo_euler_forward_one(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::euler_forward_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::euler_forward_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; } } void halo_euler_forward_one_avg(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double alpha, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::euler_forward_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::euler_forward_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; double sumrecpf = 0.0; double sumrecpb = 0.0; double elef = 0.0; double eleb = 0.0; // Average the new results with old results for(int j=0; j<3;j++){ elef = alpha * (*rho_array_store_ptr)[i][j] + (1 - alpha) * (*rho_array_ptr)[i][j]; (*rho_array_ptr)[i][j] = elef; eleb= alpha * (*rho_array_store_ptr)[length + i][j] + (1 - alpha) * (*rho_array_ptr)[length + i][j]; (*rho_array_ptr)[length + i][j] = eleb; sumrecpf = sumrecpf + elef*elef; sumrecpb = sumrecpb + eleb*eleb; } sumrecpf = 1/( std::sqrt(sumrecpf) ); sumrecpb = 1/( std::sqrt(sumrecpb) ); for(int j=0;j < 3;j++) { (*rho_array_ptr)[i][j] = (*rho_array_ptr)[i][j] * sumrecpf; (*rho_array_ptr)[length + i][j] = (*rho_array_ptr)[length + i][j] * sumrecpb; } } } void halo_euler_forward_one_incline(StateArray *rho_array_ptr, StateArray *rho_array_store_ptr, const double dt, const int totallength, const double alpha, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0) { // loop through for iter iterations int length = totallength / 2; double alpha_rescaled = alpha/length; #pragma omp parallel for for (int i = 0; i < length - 1; i++) { // state_type hamilf; // state_type hamilb; // Stepper::euler_forward_one_w_h has been validated and compared to previous results. Stepper::euler_forward_one_incline(alpha_rescaled, (*rho_array_ptr)[i + 1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta); Stepper::euler_forward_one_incline(alpha_rescaled, (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length - 1 + i], (*rho_array_store_ptr)[length - 1 - i], dt, 1.0, muf, costheta); // int ipfsign = Helper::sgnf(innerproductf); // int ipbsign = Helper::sgnf(innerproductb); } } void halo_evolution_op_one(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double reflection = 1.0, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::evolution_op_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::evolution_op_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; } } void halo_euler_forward_one_nunubar(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, StateArray* rho_another_array_ptr, StateArray* rho_another_array_store_ptr, const double dt, const int totallength, const double spectrum[2], const double reflection, const double mu_arr[2], const double costheta[4]){ // loop through for iter iterations // spectrum[2] = {left beam, right beam} // mu_arr = {mu_left, mu_right} or {mu_1,mu_2} for the two beams // I define {mu_self, mu_the_other} when doing the calculations since the Hamiltonian takes in such parameters. // costheta[4] = { cos(2theta_left), cos(2theta_right) , cos(theta_right- theta_left), cos (theta_right+theta_left) } int length = totallength/2; // when calculating the right beam, the order of spectrum is reversed so I define the reversed spectrum double spectrum_r[2]; double mu_arr_r[2]; spectrum_r[0] = spectrum[1]; // spectrum_r is for the calculation of the right beam spectrum_r[1] = spectrum[0]; mu_arr_r[0] = mu_arr[1]; mu_arr_r[1] = mu_arr[0]; // define the costheta's needed for each beam double costheta_l[3]; double costheta_r[3]; costheta_l[0] = costheta[0]; costheta_l[1] = costheta[3]; costheta_l[2] = costheta[2]; costheta_r[0] = costheta[1]; costheta_r[1] = costheta[3]; costheta_r[2] = costheta[2]; // build the reflection array double refl_arr_f[2]; refl_arr_f[1] = reflection; refl_arr_f[0] = 1.0; double refl_arr_b[2]; refl_arr_b[1] = 1.0; refl_arr_b[0] = reflection; // interaction_nunubar( state_type &h_store, const state_type &rho_counter, const state_type &rho_ya_counter, const state_type &rho_same_direction, const double spectrum[2], const double reflection[2], const double muf[2], const double costheta[3] ) #pragma omp parallel for for(int i =0; i<length-1; i++){ // the left beam forward Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[i], dt, spectrum, refl_arr_f, mu_arr, costheta_l) ; // right beam forward Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[i+1], (*rho_another_array_store_ptr)[i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[i], dt, spectrum_r, refl_arr_f, mu_arr_r, costheta_r) ; // left beam backward: left means it's the continuation of the original left beam, which is stored in the same array // Comment out to test bipolar model Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], (*rho_another_array_store_ptr)[length - 1 - i], (*rho_another_array_store_ptr)[length-1+i], dt, spectrum, refl_arr_b, mu_arr, costheta_l); // right beam backward // Comment out to test bipolar model Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[length + i], (*rho_another_array_store_ptr)[length -1 + i], (*rho_another_array_store_ptr)[length - 1 - i], (*rho_array_store_ptr)[length -1 - i], (*rho_array_store_ptr)[length-1+i], dt, spectrum_r, refl_arr_b, mu_arr_r, costheta_r); } } void halo_euler_forward_one_bipolar(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, StateArray* rho_another_array_ptr, StateArray* rho_another_array_store_ptr, const double dt, const int totallength, const double spectrum[2], const double reflection, const double mu_arr[2], const double costheta[4]){ // loop through for iter iterations // spectrum[2] = {left beam, right beam} // mu_arr = {mu_left, mu_right} or {mu_1,mu_2} for the two beams // I define {mu_self, mu_the_other} when doing the calculations since the Hamiltonian takes in such parameters. // costheta[4] = { cos(2theta_left), cos(2theta_right) , cos(theta_right- theta_left), cos (theta_right+theta_left) } int length = totallength/2; // when calculating the right beam, the order of spectrum is reversed so I define the reversed spectrum double spectrum_r[2]; double mu_arr_r[2]; spectrum_r[0] = spectrum[1]; // spectrum_r is for the calculation of the right beam spectrum_r[1] = spectrum[0]; mu_arr_r[0] = mu_arr[1]; mu_arr_r[1] = mu_arr[0]; // define the costheta's needed for each beam double costheta_l[3]; double costheta_r[3]; costheta_l[0] = costheta[0]; costheta_l[1] = costheta[3]; costheta_l[2] = costheta[2]; costheta_r[0] = costheta[1]; costheta_r[1] = costheta[3]; costheta_r[2] = costheta[2]; // build the reflection array double refl_arr_f[2]; refl_arr_f[1] = reflection; refl_arr_f[0] = 1.0; double refl_arr_b[2]; refl_arr_b[1] = 1.0; refl_arr_b[0] = reflection; // interaction_nunubar( state_type &h_store, const state_type &rho_counter, const state_type &rho_ya_counter, const state_type &rho_same_direction, const double spectrum[2], const double reflection[2], const double muf[2], const double costheta[3] ) #pragma omp parallel for for(int i =0; i<length-1; i++){ // the left beam forward Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength-2-i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[i], dt, spectrum, refl_arr_f, mu_arr, costheta_l) ; // right beam forward Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[i+1], (*rho_another_array_store_ptr)[i], (*rho_another_array_store_ptr)[totallength-2-i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[i], dt, spectrum_r, refl_arr_f, mu_arr_r, costheta_r) ; } } } #endif //HALO_PARALLEL_LOOPER_H
GB_unaryop__ainv_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_uint32 // op(A') function: GB_tran__ainv_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_uint32 ( uint16_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_kmeans.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "kmeans.h" // distancia euclideana __inline static float euclid_dist_2(int numdims, /* num_dimensiones */ float *coord1, /* [num_dimensiones] */ float *coord2) /* [num_dimensiones] */ { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (coord1[i]-coord2[i]) * (coord1[i]-coord2[i]); return(ans); } // encontrar el cluster mas cercano __inline static int find_nearest_cluster(int numClusters, /* num de clusters */ int numCoords, /* num de coordenadas */ float *object, /* [num de coordenadas] */ float **clusters) /* [num_clusters][num_coordenadas] */ { int index, i; float dist, min_dist; // encontrar el id del cluster mas cercano al objeto index = 0; min_dist = euclid_dist_2(numCoords, object, clusters[0]); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numCoords, object, clusters[i]); if (dist < min_dist) { min_dist = dist; index = i; } } return(index); } /* retorna un arreglo de centros de los clusters de tamaño [num_clusters][num_coordenadas] */ float** omp_kmeans(int is_perform_atomic, float **objects, /* entrada: [num_objetos][num_coordenadas] */ int numCoords, /* num_coordenadas */ int numObjs, /* num_objetos */ int numClusters, /* num_clusters */ float threshold, int *membership) /* salidas: [num__objetos] */ { int i, j, k, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **clusters; /* out: [numClusters][numCoords] */ float **newClusters; /* [numClusters][numCoords] */ double timing; int nthreads; /* no. threads */ int **local_newClusterSize; /* [nthreads][numClusters] */ float ***local_newClusters; /* [nthreads][numClusters][numCoords] */ nthreads = omp_get_max_threads(); /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ clusters = (float**) malloc(numClusters * sizeof(float*)); assert(clusters != NULL); clusters[0] = (float*) malloc(numClusters * numCoords * sizeof(float)); assert(clusters[0] != NULL); for (i=1; i<numClusters; i++) clusters[i] = clusters[i-1] + numCoords; /* pick first numClusters elements of objects[] as initial cluster centers*/ for (i=0; i<numClusters; i++) for (j=0; j<numCoords; j++) clusters[i][j] = objects[i][j]; /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); newClusters = (float**) malloc(numClusters * sizeof(float*)); assert(newClusters != NULL); newClusters[0] = (float*) calloc(numClusters * numCoords, sizeof(float)); assert(newClusters[0] != NULL); for (i=1; i<numClusters; i++) newClusters[i] = newClusters[i-1] + numCoords; if (!is_perform_atomic) { /* each thread calculates new centers using a private space, then thread 0 does an array reduction on them. This approach should be faster */ local_newClusterSize = (int**) malloc(nthreads * sizeof(int*)); assert(local_newClusterSize != NULL); local_newClusterSize[0] = (int*) calloc(nthreads*numClusters, sizeof(int)); assert(local_newClusterSize[0] != NULL); for (i=1; i<nthreads; i++) local_newClusterSize[i] = local_newClusterSize[i-1]+numClusters; /* local_newClusters is a 3D array */ local_newClusters =(float***)malloc(nthreads * sizeof(float**)); assert(local_newClusters != NULL); local_newClusters[0] =(float**) malloc(nthreads * numClusters * sizeof(float*)); assert(local_newClusters[0] != NULL); for (i=1; i<nthreads; i++) local_newClusters[i] = local_newClusters[i-1] + numClusters; for (i=0; i<nthreads; i++) { for (j=0; j<numClusters; j++) { local_newClusters[i][j] = (float*)calloc(numCoords, sizeof(float)); assert(local_newClusters[i][j] != NULL); } } } if (_debug) timing = omp_get_wtime(); do { delta = 0.0; if (is_perform_atomic) { #pragma omp parallel for \ private(i,j,index) \ firstprivate(numObjs,numClusters,numCoords) \ shared(objects,clusters,membership,newClusters,newClusterSize) \ schedule(static) \ reduction(+:delta) for (i=0; i<numObjs; i++) { /* encuentra el indice al cluster más cercano */ index = find_nearest_cluster(numClusters, numCoords, objects[i], clusters); /* incrementa delta */ if (membership[i] != index) delta += 1.0; /* asigna el id del cluster al objeto-i */ membership[i] = index; /*actualizar los nuevos centros : suma de todos los objetos en la misma región */ #pragma omp atomic newClusterSize[index]++; for (j=0; j<numCoords; j++) #pragma omp atomic newClusters[index][j] += objects[i][j]; } } else { #pragma omp parallel \ shared(objects,clusters,membership,local_newClusters,local_newClusterSize) { int tid = omp_get_thread_num(); #pragma omp for \ private(i,j,index) \ firstprivate(numObjs,numClusters,numCoords) \ schedule(static) \ reduction(+:delta) for (i=0; i<numObjs; i++) { /* encuentra el indice al cluster más cercano */ index = find_nearest_cluster(numClusters, numCoords, objects[i], clusters); /* incrementa delta */ if (membership[i] != index) delta += 1.0; /* asigna el id del cluster al objeto-i*/ membership[i] = index; /* actualizar los nuevos centros : suma de todos los objetos en la misma región*/ local_newClusterSize[tid][index]++; for (j=0; j<numCoords; j++) local_newClusters[tid][index][j] += objects[i][j]; } } /* fin #pragma omp parallel */ /* permite al hilo principal realizar la reduccion del array */ for (i=0; i<numClusters; i++) { for (j=0; j<nthreads; j++) { newClusterSize[i] += local_newClusterSize[j][i]; local_newClusterSize[j][i] = 0.0; for (k=0; k<numCoords; k++) { newClusters[i][k] += local_newClusters[j][i][k]; local_newClusters[j][i][k] = 0.0; } } } } /* promedio de la suma y se reemplaza los centros antiguos con los nuevos */ for (i=0; i<numClusters; i++) { for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 1) clusters[i][j] = newClusters[i][j] / newClusterSize[i]; newClusters[i][j] = 0.0; } newClusterSize[i] = 0; } delta /= numObjs; } while (delta > threshold && loop++ < 500); if (_debug) { timing = omp_get_wtime() - timing; printf("nloops = %2d (T = %7.4f)",loop,timing); } if (!is_perform_atomic) { free(local_newClusterSize[0]); free(local_newClusterSize); for (i=0; i<nthreads; i++) for (j=0; j<numClusters; j++) free(local_newClusters[i][j]); free(local_newClusters[0]); free(local_newClusters); } free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); if (image->storage_class == PseudoClass) colormap_index=(ssize_t *) AcquireQuantumMemory(MagickMax(image->colors+1, MaxMap),sizeof(*colormap_index)); else colormap_index=(ssize_t *) AcquireQuantumMemory(MagickMax(MaxColormapSize+1, MaxMap),sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
cvAdvDiff_bnd_omp.c
/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds and Ting Yan @ SMU * Based on cvAdvDiff_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * solved using CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the SUNBAND linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value or the number of threads from the * environment value, run without arguments: * % ./cvAdvDiff_bnd_omp * The environment variable can be over-ridden with a command line * argument specifying the number of threads to use, e.g: * % ./cvAdvDiff_bnd_omp 5 * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Header files with a description of contents */ #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants */ #define XMAX RCONST(2.0) /* domain boundaries */ #define YMAX RCONST(1.0) #define MX 10 /* mesh dimensions */ #define MY 5 #define NEQ MX*MY /* number of equations */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* User-defined vector access macro IJth */ /* IJth is defined in order to isolate the translation from the mathematical 2-dimensional structure of the dependent variable vector to the underlying 1-dimensional storage. IJth(vdata,i,j) references the element in the vdata array for u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY. The vdata array is obtained via the macro call vdata = NV_DATA_S(v), where v is an N_Vector. The variables are ordered by the y index j, then by the x index i. */ #define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY]) /* Type : UserData (contains grid constants) */ typedef struct { realtype dx, dy, hdcoef, hacoef, vdcoef; int nthreads; } *UserData; /* Private Helper Functions */ static void SetIC(N_Vector u, UserData data); static void PrintHeader(realtype reltol, realtype abstol, realtype umax); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char *argv[]) { realtype dx, dy, reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNMatrix A; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; int num_threads; u = NULL; data = NULL; A = NULL; LS = NULL; cvode_mem = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Create an OpenMP vector */ u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; data = (UserData) malloc(sizeof *data); /* Allocate data memory */ if(check_retval((void *)data, "malloc", 2)) return(1); dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */ dy = data->dy = YMAX/(MY+1); data->hdcoef = ONE/(dx*dx); data->hacoef = HALF/(TWO*dx); data->vdcoef = ONE/(dy*dy); data->nthreads = num_threads; SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the inital time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create banded SUNMatrix for use in linear solves -- since this will be factored, set the storage bandwidth to be the sum of upper and lower bandwidths */ A = SUNBandMatrix(NEQ, MY, MY); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); /* Create banded SUNLinearSolver object for use by CVode */ LS = SUNLinSol_Band(u, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); /* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */ retval = CVodeSetLinearSolver(cvode_mem, LS, A); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the user-supplied Jacobian routine Jac */ retval = CVodeSetJacFn(cvode_mem, Jac); if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ printf("num_threads = %i\n\n", num_threads); N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ SUNLinSolFree(LS); /* Free the linear solver memory */ SUNMatDestroy(A); /* Free the matrix memory */ free(data); /* Free the user data */ return(0); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u,N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff; realtype *udata, *dudata; sunindextype i, j; UserData data; i = j = 0; udata = NV_DATA_OMP(u); dudata = NV_DATA_OMP(udot); /* Extract needed constants from data */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; /* Loop over all grid points. */ #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { /* Extract u at x_i, y_j and four neighboring points */ uij = IJth(udata, i, j); udn = (j == 1) ? ZERO : IJth(udata, i, j-1); uup = (j == MY) ? ZERO : IJth(udata, i, j+1); ult = (i == 1) ? ZERO : IJth(udata, i-1, j); urt = (i == MX) ? ZERO : IJth(udata, i+1, j); /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); IJth(dudata, i, j) = hdiff + hadv + vdiff; } } return(0); } /* Jacobian routine. Compute J(t,u). */ static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { sunindextype i, j, k; realtype *kthCol, hordc, horac, verdc; UserData data; /* The components of f = udot that depend on u(i,j) are f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2) df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1) df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX) df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1) df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY) */ i = j = 0; data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { k = j-1 + (i-1)*MY; kthCol = SUNBandMatrix_Column(J,k); /* set the kth column of J */ SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc); if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac; if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac; if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc; if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc; } } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { sunindextype i, j; realtype x, y, dx, dy; realtype *udata; i = j = 0; /* Extract needed constants from data */ dx = data->dx; dy = data->dy; /* Set pointer to data array in vector u. */ udata = NV_DATA_OMP(u); /* Load initial profile into u vector */ #pragma omp parallel for default(shared) private(j, i, y, x) for (j=1; j <= MY; j++) { y = j*dy; for (i=1; i <= MX; i++) { x = i*dx; IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*exp(FIVE*x*y); } } } /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", MX, MY); printf("Total system size = %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { int retval; long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS; retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetNumJacEvals(cvode_mem, &nje); check_retval(&retval, "CVodeGetNumJacEvals", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics:\n"); printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n", nst, nfe, nsetups, nfeLS, nje); printf("nni = %-6ld ncfn = %-6ld netf = %ld\n", nni, ncfn, netf); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval < 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }